Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add broadcasting support (e.g. matrix-vector) for cos sim operator. #3918

Merged
merged 3 commits into from
Sep 14, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 69 additions & 26 deletions paddle/operators/cos_sim_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,33 +25,58 @@ class CosSimOp : public framework::OperatorWithKernel {

protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
// notnull check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(),
"Dimensions of Input(X) and Input(Y) must be the same.");

auto dims = ctx.Input<Tensor>("X")->dims();
ctx.Output<Tensor>("Out")->Resize({dims[0], 1});
ctx.Output<Tensor>("XNorm")->Resize({dims[0], 1});
ctx.Output<Tensor>("YNorm")->Resize({dims[0], 1});

// shape check
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();

PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(),
"Ranks of Input(X) and Input(Y) must be equal.");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Rank of Input(X) must not be less than 2.");
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()),
"All dimensions except the 1st of Input(X) and Input(Y) "
"must be equal.");
PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1,
"The 1st dimension of Input(Y) must be equal to Input(X) or"
" just 1 (which will be broadcasted to match Input(X)).");

// resize tensor
ctx.Output<Tensor>("Out")->Resize({x_dims[0], 1});
ctx.Output<Tensor>("XNorm")->Resize({x_dims[0], 1});
ctx.Output<Tensor>("YNorm")->Resize({y_dims[0], 1});
}
};

class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
public:
CosSimOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The first input of cos_sim op.");
AddInput("Y", "The second input of cos_sim op.");
AddInput("X", "The 1st input of cos_sim op.");
AddInput("Y", "The 2nd input of cos_sim op.");
AddOutput("Out", "The output of cos_sim op.");
AddOutput("XNorm", "Row norm of the first input.").AsIntermediate();
AddOutput("YNorm", "Row norm of the second input.").AsIntermediate();
AddOutput("XNorm",
"Norm of the first input, reduced along the 1st "
"dimension.")
.AsIntermediate();
AddOutput("YNorm",
"Norm of the second input, reduced along the 1st "
"dimension.")
.AsIntermediate();

AddComment(R"DOC(
Cosine Similarity Operator.

The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y))
The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)).

Input(X) and Input(Y) must have the same shape, except that the 1st dimension
of Input(Y) could be just 1 (different from Input(X)), which will be
broadcasted to match the shape of Input(X) before computing their cosine
similarity.
)DOC");
}
};
Expand All @@ -62,32 +87,50 @@ class CosSimOpGrad : public framework::OperatorWithKernel {

protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
// notnull check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("XNorm"),
"Input(XNorm) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("YNorm"),
"Input(YNorm) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Out"),
"Input(Out) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) must not be null.");

// shape check
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto xnorm_dims = ctx.Input<Tensor>("XNorm")->dims();
auto ynorm_dims = ctx.Input<Tensor>("YNorm")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
PADDLE_ENFORCE_EQ(x_dims, y_dims,
"Dimensions of Input(X) and Input(Y) must be the same.");
PADDLE_ENFORCE_EQ(xnorm_dims[0], x_dims[0],
"1st dimension of XNorm must equal that of Input(X).");
PADDLE_ENFORCE_EQ(xnorm_dims[1], 1, "2st dimension of XNorm must be one.");
PADDLE_ENFORCE_EQ(ynorm_dims[0], y_dims[0],
"1st dimension of YNorm must equal that of Input(Y).");
PADDLE_ENFORCE_EQ(ynorm_dims[1], 1, "2st dimension of YNorm must be one.");
PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0],
"1st dimension of Out@GRAD must equal that of Input(X)");
PADDLE_ENFORCE_EQ(out_dims[1], 1, "1st dimension of Out@GRAD must be one.");

auto out_dims = ctx.Input<Tensor>("Out")->dims();
auto out_grad_dims =
ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();

PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
"Ranks of Input(X) and Input(Y) must be equal.");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Rank of Input(X) must not be less than 2.");
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()),
"All dimensions except the 1st of Input(X) and Input(Y) "
"must be equal.");
PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1,
"The 1st dimension of Input(Y) must be equal to Input(X) or"
" just 1 (which will be broadcasted to match Input(X)).");
auto target_xnorm_dims = framework::make_ddim({x_dims[0], 1});
auto target_ynorm_dims = framework::make_ddim({y_dims[0], 1});
PADDLE_ENFORCE_EQ(xnorm_dims, target_xnorm_dims,
"Shape of Input(XNorm) must be [X.Dim(0), 1].");
PADDLE_ENFORCE_EQ(ynorm_dims, target_ynorm_dims,
"Shape of Input(YNorm) must be [Y.Dim(0), 1].");
PADDLE_ENFORCE_EQ(out_dims, target_xnorm_dims,
"Shape of Input(Out) must be [X.Dim(0), 1].");
PADDLE_ENFORCE_EQ(out_grad_dims, target_xnorm_dims,
"Shape of Input(Out@Grad) must be [X.Dim(0), 1].");

// resize tensor
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
if (x_grad) x_grad->Resize(x_dims);
Expand Down
147 changes: 92 additions & 55 deletions paddle/operators/cos_sim_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,74 +31,111 @@ template <typename Place, typename T>
class CosSimKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input_x = context.Input<Tensor>("X");
auto* input_y = context.Input<Tensor>("Y");
auto* output_z = context.Output<Tensor>("Out");
auto* output_x_norm = context.Output<Tensor>("XNorm");
auto* output_y_norm = context.Output<Tensor>("YNorm");
// get Tensor
auto* in_x = context.Input<Tensor>("X");
auto* in_y = context.Input<Tensor>("Y");
auto* out_z = context.Output<Tensor>("Out");
auto* out_x_norm = context.Output<Tensor>("XNorm");
auto* out_y_norm = context.Output<Tensor>("YNorm");
out_z->mutable_data<T>(context.GetPlace());
out_x_norm->mutable_data<T>(context.GetPlace());
out_y_norm->mutable_data<T>(context.GetPlace());

output_z->mutable_data<T>(context.GetPlace());
output_x_norm->mutable_data<T>(context.GetPlace());
output_y_norm->mutable_data<T>(context.GetPlace());

auto dims = input_x->dims();
int64_t size = input_x->numel();
auto new_dims = framework::make_ddim({dims[0], size / dims[0]});
auto x = EigenMatrix<T>::From(*input_x, new_dims);
auto y = EigenMatrix<T>::From(*input_y, new_dims);
auto z = EigenVector<T>::Flatten(*output_z);
auto x_norm = EigenVector<T>::Flatten(*output_x_norm);
auto y_norm = EigenVector<T>::Flatten(*output_y_norm);
// convert Tensor to Eigen Tensor
int rows_x = in_x->dims()[0];
int rows_y = in_y->dims()[0];
auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto y = EigenMatrix<T>::Reshape(*in_y, 1);
auto z = EigenVector<T>::Flatten(*out_z);
auto x_norm = EigenVector<T>::Flatten(*out_x_norm);
auto y_norm = EigenVector<T>::Flatten(*out_y_norm);

// compute
auto place = context.GetEigenDevice<Place>();
auto xy = (x * y).sum(Eigen::array<int, 1>({{1}}));
x_norm.device(place) = x.square().sum(Eigen::array<int, 1>({{1}})).sqrt();
y_norm.device(place) = y.square().sum(Eigen::array<int, 1>({{1}})).sqrt();
z.device(place) = xy / x_norm / y_norm;
auto row_along = Eigen::array<int, 1>({{1}});
x_norm.device(place) = x.square().sum(row_along).sqrt();
y_norm.device(place) = y.square().sum(row_along).sqrt();
if (rows_x == rows_y) {
auto xy = (x * y).sum(Eigen::array<int, 1>({1}));
z.device(place) = xy / x_norm / y_norm;
} else {
Eigen::DSizes<int, 2> bcast(rows_x, 1);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

better change bcast to bcast_dims?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

to bcast_rows

auto xy = (x * y.broadcast(bcast)).sum(row_along);
z.device(place) = xy / x_norm / y_norm.broadcast(bcast);
}
}
};

template <typename Place, typename T>
class CosSimGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input_x = context.Input<Tensor>("X");
auto* input_y = context.Input<Tensor>("Y");
auto* input_z = context.Input<Tensor>("Out");
auto* input_x_norm = context.Input<Tensor>("XNorm");
auto* input_y_norm = context.Input<Tensor>("YNorm");
auto* output_grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad_y = context.Output<Tensor>(framework::GradVarName("Y"));
auto* input_grad_z = context.Input<Tensor>(framework::GradVarName("Out"));
// get Tensor
auto* in_x = context.Input<Tensor>("X");
auto* in_y = context.Input<Tensor>("Y");
auto* in_z = context.Input<Tensor>("Out");
auto* in_x_norm = context.Input<Tensor>("XNorm");
auto* in_y_norm = context.Input<Tensor>("YNorm");
auto* out_grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* out_grad_y = context.Output<Tensor>(framework::GradVarName("Y"));
auto* in_grad_z = context.Input<Tensor>(framework::GradVarName("Out"));

auto dims = input_x->dims();
int64_t size = input_x->numel();
auto new_dims = framework::make_ddim({dims[0], size / dims[0]});
auto x = EigenMatrix<T>::From(*input_x, new_dims);
auto y = EigenMatrix<T>::From(*input_y, new_dims);
auto z = EigenMatrix<T>::From(*input_z);
auto x_norm = EigenMatrix<T>::From(*input_x_norm);
auto y_norm = EigenMatrix<T>::From(*input_y_norm);
auto dz = EigenMatrix<T>::From(*input_grad_z);
// convert Tensor to Eigen Tensor
auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto y = EigenMatrix<T>::Reshape(*in_y, 1);
auto z = EigenMatrix<T>::Reshape(*in_z, 1);
auto x_norm = EigenMatrix<T>::Reshape(*in_x_norm, 1);
auto y_norm = EigenMatrix<T>::Reshape(*in_y_norm, 1);
auto dz = EigenMatrix<T>::Reshape(*in_grad_z, 1);

Eigen::DSizes<int, 2> bcast(1, new_dims[1]);
auto z_bcast = z.broadcast(bcast);
auto dz_bcast = dz.broadcast(bcast);
// compute gradident
int rows_x = in_x->dims()[0];
int rows_y = in_y->dims()[0];
int cols = framework::product(in_x->dims()) / rows_x;
Eigen::DSizes<int, 2> bcast_cols(1, cols);
auto z_bcast = z.broadcast(bcast_cols);
auto dz_bcast = dz.broadcast(bcast_cols);
auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast_cols);
auto place = context.GetEigenDevice<Place>();
auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast);
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast);
auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast);
if (output_grad_x) {
output_grad_x->mutable_data<T>(context.GetPlace());
auto dx = EigenMatrix<T>::From(*output_grad_x, new_dims);
dx.device(place) =
dz_bcast * (y / norm_prod_bcast - z_bcast * x / x_snorm_bcast);
}
if (output_grad_y) {
output_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::From(*output_grad_y, new_dims);
dy.device(place) =
dz_bcast * (x / norm_prod_bcast - z_bcast * y / y_snorm_bcast);
if (rows_x == rows_y) {
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_cols);
auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast_cols);
// compute dx
if (out_grad_x) {
out_grad_x->mutable_data<T>(context.GetPlace());
auto dx = EigenMatrix<T>::Reshape(*out_grad_x, 1);
auto grad = y / norm_prod_bcast - z_bcast * x / x_snorm_bcast;
dx.device(place) = dz_bcast * grad;
}
// compute dy
if (out_grad_y) {
out_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1);
auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast;
dy.device(place) = dz_bcast * grad;
}
} else {
Eigen::DSizes<int, 2> bcast_rows(rows_x, 1);
Eigen::DSizes<int, 2> bcast_rows_cols(rows_x, cols);
auto y_bcast = y.broadcast(bcast_rows);
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols);
auto norm_prod_bcast = (x_norm * y_norm.eval().broadcast(bcast_rows))
.eval()
.broadcast(bcast_cols);
// compute dx
if (out_grad_x) {
out_grad_x->mutable_data<T>(context.GetPlace());
auto dx = EigenMatrix<T>::Reshape(*out_grad_x, 1);
auto grad = y_bcast / norm_prod_bcast - z_bcast * x / x_snorm_bcast;
dx.device(place) = dz_bcast * grad;
}
// compute dy
if (out_grad_y) {
out_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1);
auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast;
dy.device(place) = (dz_bcast * grad).sum(Eigen::array<int, 1>({0}));
}
}
}
};
Expand Down
64 changes: 59 additions & 5 deletions python/paddle/v2/framework/tests/test_cos_sim_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ class TestCosSimOp(OpTest):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((10, 5)).astype("float32")
'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((6, 5)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
Expand All @@ -28,12 +28,66 @@ def test_check_grad_normal(self):

def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X'))
['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X"))

def test_check_grad_ignore_y(self):
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))


if __name__ == "__main__":
class TestCosSimOp2(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((1, 5)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}


class TestCosSimOp3(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((6, 5, 2)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}


class TestCosSimOp4(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((1, 5, 2)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}


if __name__ == '__main__':
unittest.main()