Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change Op::GetAttr to Op::Attr #3903

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions paddle/framework/op_registry_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) {
paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx);
float scale_get = op->GetAttr<float>("scale");
float scale_get = op->Attr<float>("scale");
ASSERT_EQ(scale_get, scale);
}

Expand Down Expand Up @@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) {
paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx);
ASSERT_EQ(op->GetAttr<float>("scale"), 1.0);
ASSERT_EQ(op->Attr<float>("scale"), 1.0);
}

TEST(OpRegistry, CustomChecker) {
Expand Down Expand Up @@ -172,6 +172,6 @@ TEST(OpRegistry, CustomChecker) {
paddle::platform::CPUDeviceContext dev_ctx;
paddle::framework::Scope scope;
op->Run(scope, dev_ctx);
int test_attr = op->GetAttr<int>("test_attr");
int test_attr = op->Attr<int>("test_attr");
ASSERT_EQ(test_attr, 4);
}
6 changes: 3 additions & 3 deletions paddle/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class OperatorBase {
virtual ~OperatorBase() {}

template <typename T>
inline const T& GetAttr(const std::string& name) const {
inline const T& Attr(const std::string& name) const {
PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap",
name);
return boost::get<T>(attrs_.at(name));
Expand Down Expand Up @@ -238,8 +238,8 @@ class InferShapeContext {
const Scope& scope() const { return scope_; }

template <typename T>
inline const T& GetAttr(const std::string& name) const {
return op_.GetAttr<T>(name);
inline const T& Attr(const std::string& name) const {
return op_.Attr<T>(name);
}

size_t InputSize(const std::string& name) const {
Expand Down
8 changes: 4 additions & 4 deletions paddle/operators/gaussian_random_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ template <typename T>
class CPUGaussianRandomKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
float mean = context.GetAttr<float>("mean");
float std = context.GetAttr<float>("std");
float mean = context.Attr<float>("mean");
float std = context.Attr<float>("std");
auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace());

unsigned int seed = static_cast<unsigned int>(context.GetAttr<int>("seed"));
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
Expand All @@ -45,7 +45,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
protected:
void InferShape(const framework::InferShapeContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out");
auto dims = GetAttr<std::vector<int>>("dims");
auto dims = Attr<std::vector<int>>("dims");
PADDLE_ENFORCE(dims.size() > 0UL,
"dims can be one int or array. dims must be set.");
tensor->Resize(framework::make_ddim(dims));
Expand Down
4 changes: 2 additions & 2 deletions paddle/operators/gaussian_random_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ class GPUGaussianRandomKernel : public framework::OpKernel {
std::random_device rd;
seed = rd();
}
T mean = static_cast<T>(context.GetAttr<float>("mean"));
T std = static_cast<T>(context.GetAttr<float>("std"));
T mean = static_cast<T>(context.Attr<float>("mean"));
T std = static_cast<T>(context.Attr<float>("std"));
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
ssize_t N = framework::product(tensor->dims());
thrust::transform(index_sequence_begin, index_sequence_begin + N,
Expand Down
8 changes: 4 additions & 4 deletions paddle/operators/rnn/recurrent_op_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ void InitArgument(const ArgumentName& name, Argument* arg,
arg->step_scopes = op.Output(name.step_scopes);

auto inlinks = op.Inputs(name.inlinks);
auto inlink_alias = op.GetAttr<std::vector<std::string>>(name.inlink_alias);
auto inlink_alias = op.Attr<std::vector<std::string>>(name.inlink_alias);
PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(),
"the size of inlinks and inlink_alias don't match:%d,%d",
inlinks.size(), inlink_alias.size());
Expand All @@ -121,7 +121,7 @@ void InitArgument(const ArgumentName& name, Argument* arg,
}

auto outlinks = op.Outputs(name.outlinks);
auto outlink_alias = op.GetAttr<std::vector<std::string>>(name.outlink_alias);
auto outlink_alias = op.Attr<std::vector<std::string>>(name.outlink_alias);
PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(),
"the size of outlinks and outlink_alias don't match:%d,%d",
outlinks.size(), outlink_alias.size());
Expand All @@ -135,8 +135,8 @@ void InitArgument(const ArgumentName& name, Argument* arg,
auto boot_memories = op.Inputs(name.boot_memories);

// attributes
auto memories = op.GetAttr<std::vector<std::string>>(name.memories);
auto pre_memories = op.GetAttr<std::vector<std::string>>(name.pre_memories);
auto memories = op.Attr<std::vector<std::string>>(name.memories);
auto pre_memories = op.Attr<std::vector<std::string>>(name.pre_memories);

PADDLE_ENFORCE(memories.size() == boot_memories.size(),
"the size of memories, boot_memories don't match:%d,%d",
Expand Down
2 changes: 1 addition & 1 deletion paddle/operators/scale_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class ScaleGradOp : public NetOp {
AppendOp(framework::OpRegistry::CreateOp(
"scale", {{"X", {Input(framework::GradVarName("Out"))}}},
{{"Out", {Output(framework::GradVarName("X"))}}},
{{"scale", GetAttr<AttrType>("scale")}}));
{{"scale", Attr<AttrType>("scale")}}));
CompleteAddOp(false);
}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/operators/scale_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class ScaleKernel : public framework::OpKernel {
auto* in = context.Input<framework::Tensor>("X");
tensor->mutable_data<T>(in->place());

auto scale = static_cast<T>(context.GetAttr<AttrType>("scale"));
auto scale = static_cast<T>(context.Attr<AttrType>("scale"));

auto eigen_out = framework::EigenVector<T>::Flatten(*tensor);
auto eigen_in = framework::EigenVector<T>::Flatten(*in);
Expand Down
2 changes: 1 addition & 1 deletion paddle/operators/sgd_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel {
auto param = ctx.Input<Tensor>("param");
auto grad = ctx.Input<Tensor>("grad");
auto param_out = ctx.Output<Tensor>("param_out");
float lr = ctx.GetAttr<float>("learning_rate");
float lr = ctx.Attr<float>("learning_rate");

param_out->mutable_data<T>(ctx.GetPlace());

Expand Down
10 changes: 5 additions & 5 deletions paddle/operators/uniform_random_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ class CPUUniformRandomKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.GetAttr<int>("seed"));
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
std::uniform_real_distribution<T> dist(
static_cast<T>(context.GetAttr<float>("min")),
static_cast<T>(context.GetAttr<float>("max")));
static_cast<T>(context.Attr<float>("min")),
static_cast<T>(context.Attr<float>("max")));
ssize_t size = framework::product(tensor->dims());
for (ssize_t i = 0; i < size; ++i) {
data[i] = dist(engine);
Expand All @@ -48,10 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {

protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE(GetAttr<float>("min") < GetAttr<float>("max"),
PADDLE_ENFORCE(Attr<float>("min") < Attr<float>("max"),
"uniform_random's min must less then max");
auto* tensor = ctx.Output<framework::Tensor>("Out");
auto dims = GetAttr<std::vector<int>>("dims");
auto dims = Attr<std::vector<int>>("dims");
tensor->Resize(framework::make_ddim(dims));
}
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/operators/uniform_random_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ class GPUUniformRandomKernel : public framework::OpKernel {
std::random_device rd;
seed = rd();
}
T min = static_cast<T>(context.GetAttr<float>("min"));
T max = static_cast<T>(context.GetAttr<float>("max"));
T min = static_cast<T>(context.Attr<float>("min"));
T max = static_cast<T>(context.Attr<float>("max"));
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
ssize_t N = framework::product(tensor->dims());
thrust::transform(index_sequence_begin, index_sequence_begin + N,
Expand Down