Skip to content

Commit

Permalink
[CodeStyle] fix macos inconsistent-missing-override warnings and add …
Browse files Browse the repository at this point in the history
…-Werror (#47264)

* fix macos inconsistent-missing-override warnings

* fix inconsistent-missing-override error in test
  • Loading branch information
GreatV authored Oct 24, 2022
1 parent 31f57f2 commit c5fe109
Show file tree
Hide file tree
Showing 15 changed files with 27 additions and 26 deletions.
3 changes: 2 additions & 1 deletion cmake/flags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,8 @@ if(APPLE)
CACHE STRING "Build architectures for OSX" FORCE)
endif()
# On Mac OS X register class specifier is deprecated and will cause warning error on latest clang 10.0
set(COMMON_FLAGS -Wno-deprecated-register -Werror=format)
set(COMMON_FLAGS -Wno-deprecated-register -Werror=format
-Werror=inconsistent-missing-override)
endif()

if(WITH_HETERPS AND WITH_PSLIB)
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/accumulation/accumulation_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class GradNodeAccumulation : public GradNodeBase {

void ClearTensorWrappers() override { VLOG(5) << "Do nothing here now"; }

std::string name() { return "GradNodeAccumulation"; }
std::string name() override { return "GradNodeAccumulation"; }

/**
* Register ReduceHook
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/eager/custom_operator/custom_operator_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class RunCustomOpNode : public GradNodeBase {
bool is_new_grad = false) // NOLINT
override;

std::string name() {
std::string name() override {
return paddle::string::Sprintf("RunCustomOpNode: %s_grad", op_type_);
}

Expand Down Expand Up @@ -116,7 +116,7 @@ class RunCustomOpDoubleGradNode : public GradNodeBase {
bool is_new_grad = false) // NOLINT
override;

std::string name() {
std::string name() override {
return paddle::string::Sprintf("RunCustomOpDoubleGradNode: %s_grad_grad",
op_type_);
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/infershape_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ class CompatMetaTensor : public phi::MetaTensor {

bool initialized() const override { return initialized_; };

bool is_selected_rows() const;
bool is_tensor_array() const;
bool is_dense() const;
bool is_selected_rows() const override;
bool is_tensor_array() const override;
bool is_dense() const override;

operator unspecified_bool_type() const override {
return initialized_ ? unspecified_bool_true : 0;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/imperative/infer_var_type_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,12 @@ class RuntimeInferVarTypeContext : public framework::InferVarTypeContext {
return (it != outputs_.end() && it->second.size() > 0);
}

size_t InputSize(const std::string& name) const {
size_t InputSize(const std::string& name) const override {
return inputs_.at(name).size();
}

const std::string& InputVarName(const std::string& name,
const int index = 0) const {
const int index = 0) const override {
return GetNameFromVar(inputs_.at(name)[index]);
}

Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/inference/api/analysis_predictor.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,13 +143,13 @@ class AnalysisPredictor : public PaddlePredictor {
///
/// \return input names
///
std::vector<std::string> GetInputNames();
std::vector<std::string> GetInputNames() override;
///
/// \brief Get the output names
///
/// \return output names
///
std::vector<std::string> GetOutputNames();
std::vector<std::string> GetOutputNames() override;

///
/// \brief Get the Input Tensor object
Expand Down Expand Up @@ -227,7 +227,7 @@ class AnalysisPredictor : public PaddlePredictor {
/// \brief Clear the intermediate tensors of the predictor
///
///
void ClearIntermediateTensor();
void ClearIntermediateTensor() override;

///
/// \brief Release all tmp tensor to compress the size of the memory pool.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class RecordedAllocator : public Allocator {
return new Allocation(malloc(size), size, platform::CPUPlace());
}

void FreeImpl(phi::Allocation *allocation) {
void FreeImpl(phi::Allocation *allocation) override {
allocated_size_ -= allocation->size();
free(allocation->ptr());
delete allocation;
Expand Down Expand Up @@ -88,7 +88,7 @@ class LimitedResourceAllocator : public Allocator {
return new Allocation(malloc(size), size, platform::CPUPlace());
}

void FreeImpl(phi::Allocation *allocation) {
void FreeImpl(phi::Allocation *allocation) override {
allocated_size_ -= allocation->size();
free(allocation->ptr());
delete allocation;
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/cross_entropy_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -346,14 +346,14 @@ class CrossEntropyGradientOp2 : public CrossEntropyGradientOpBase {
}

protected:
virtual framework::DDim GetXDim(framework::InferShapeContext* ctx) const {
framework::DDim GetXDim(framework::InferShapeContext* ctx) const override {
auto x_shape = ctx->GetInputDim("XShape");
return framework::DDim(x_shape.Get(), x_shape.size() - 1);
}

virtual const char* VarNameWithXLoD() const { return "XShape"; }
const char* VarNameWithXLoD() const override { return "XShape"; }

virtual bool IsSoftLabel(framework::InferShapeContext* ctx) const {
bool IsSoftLabel(framework::InferShapeContext* ctx) const override {
return false;
}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/decode_jpeg_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class DecodeJpegOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
if (var_name == "X") {
return expected_kernel_type;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/elementwise/elementwise_div_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class ElementwiseDivOpDoubleGrad : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/elementwise/elementwise_mul_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class ElementwiseMulOp : public ElementwiseOp {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/elementwise/elementwise_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
Expand Down Expand Up @@ -409,7 +409,7 @@ class ElementwiseOpDoubleGradWithoutDXDY
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
Expand Down Expand Up @@ -461,7 +461,7 @@ class ElementwiseOpTripleGrad : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input
return framework::OpKernelType(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/inplace_abn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ class InplaceABNGradOp : public paddle::operators::BatchNormGradOp {
public:
using paddle::operators::BatchNormGradOp::BatchNormGradOp;

void InferShape(framework::InferShapeContext* ctx) const {
void InferShape(framework::InferShapeContext* ctx) const override {
// check input
OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "InplaceABNGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")),
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/optimizers/sgd_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class SGDOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const framework::OpKernelType &expected_kernel_type) const {
const framework::OpKernelType &expected_kernel_type) const override {
if (var_name == "LearningRate") {
return framework::OpKernelType(
framework::TransToProtoVarType(tensor.dtype()),
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/pad2d_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -718,7 +718,7 @@ class Pad2dOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const framework::OpKernelType& expected_kernel_type) const {
const framework::OpKernelType& expected_kernel_type) const override {
#ifdef PADDLE_WITH_MKLDNN
if ((expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
(tensor.layout() != phi::DataLayout::kMKLDNN)) {
Expand Down

0 comments on commit c5fe109

Please sign in to comment.