Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][CINN] fix cinn cpplint codestyle #55006

Merged
merged 3 commits into from
Jun 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ TEST(CostModel, Basic) {
std::vector<float>(feature_size));
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < feature_size; ++j) {
samples[i][j] = rand() % 10;
samples[i][j] = rand() % 10; // NOLINT
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ void AutoGenRule::ApplyRandomly() {
CHECK_GT(num_applicable_, 0)
<< "Call " << GetRuleName()
<< "::ApplyRandomly() with NumberApplicable() == 0";
int index = rand() % num_applicable_;
int index = rand() % num_applicable_; // NOLINT
return Apply(index);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ class MultiLevelTiling : public AutoGenRule {
if (candidates.size() == 0) {
return {1, T(extent)};
}
int index = rand() % candidates.size();
int index = rand() % candidates.size(); // NOLINT
std::vector<T> pick = candidates[index];
if (rand() % 2 != 0) {
if (rand() % 2 != 0) { // NOLINT
T tmp = pick[0];
pick[0] = pick[1];
pick[1] = tmp;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ TEST(MultiLevelTile, SampleSplitTwo) {
target, MultiLevelTiling::kConfigs.at(target.arch));

for (int i = 0; i < 100; ++i) {
size_t number_to_split = rand() % 65535 + 2; // random number in [2, 2^16]
size_t number_to_split =
rand() % 65535 + 2; // NOLINT, random number in [2, 2^16]
std::vector<size_t> split =
multi_level_tiling.SampleSplitTwo<size_t>(number_to_split);
EXPECT_EQ(split.size(), 2UL);
Expand All @@ -73,8 +74,9 @@ TEST(MultiLevelTile, SampleTileSplit) {
target, MultiLevelTiling::kConfigs.at(target.arch));

for (int i = 0; i < 100; ++i) {
int number_to_split = rand() % 65535 + 2; // random number in [2, 2^16]
int split_size = rand() % 5 + 1; // random in [1, 5]
int number_to_split =
rand() % 65535 + 2; // NOLINT, random number in [2, 2^16]
int split_size = rand() % 5 + 1; // NOLINT, random in [1, 5]
std::vector<int> split =
multi_level_tiling.SampleTileSplit<int>(number_to_split, split_size);
EXPECT_EQ(split.size(), static_cast<size_t>(split_size));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ void CheckResult(raw_func_type test_func,
input_data_ptrs[i] =
reinterpret_cast<float*>(malloc(input_data_numel * sizeof(float)));
for (int j = 0; j < input_data_numel; ++j) {
input_data_ptrs[i][j] = (rand() * 1.f) / RAND_MAX;
input_data_ptrs[i][j] = (rand() * 1.f) / RAND_MAX; // NOLINT
}
}
std::vector<float*> test_output_data_ptrs(output_names.size());
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/backends/codegen_cuda_dev.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
namespace cinn {
namespace backends {

const std::string CodeGenCUDA_Dev::source_header_ =
const std::string CodeGenCUDA_Dev::source_header_ = // NOLINT
R"(#include <cstdint>

#define CINN_WITH_CUDA
Expand Down
15 changes: 8 additions & 7 deletions paddle/cinn/frontend/computation_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ TEST(cinn_computation, basic_cpu) {
std::vector<float> hostD(M * N);
std::vector<float> hostD_expected(M * N);
for (int i = 0; i < M * N; i++) {
hostA[i] = static_cast<float>(rand()) / INT_MAX;
hostB[i] = static_cast<float>(rand()) / INT_MAX;
hostA[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostB[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostD_expected[i] = hostA[i] * 2 + hostB[i];
}

Expand Down Expand Up @@ -126,8 +126,8 @@ TEST(cinn_computation, basic_gpu) {
std::vector<float> hostD(M * N);
std::vector<float> hostD_expected(M * N);
for (int i = 0; i < M * N; i++) {
hostA[i] = static_cast<float>(rand()) / INT_MAX;
hostB[i] = static_cast<float>(rand()) / INT_MAX;
hostA[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostB[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
hostD_expected[i] = hostA[i] * 2 + hostB[i];
}

Expand Down Expand Up @@ -165,7 +165,7 @@ TEST(cinn_computation, net_builder_cpu) {
auto load_input = [=](hlir::framework::Tensor t) {
float *ptr = t->mutable_data<float>(target);
for (int i = 0; i < t->shape().numel(); i++) {
ptr[i] = static_cast<float>(rand()) / INT_MAX;
ptr[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
}
};

Expand Down Expand Up @@ -232,7 +232,8 @@ TEST(cinn_computation, fc_execute_cpu) {
auto A = inputs[0];
ASSERT_EQ(A->shape().numel(), 1 * 30);
float *ptrA = A->mutable_data<float>(target);
for (int i = 0; i < 30; i++) ptrA[i] = static_cast<float>(rand()) / INT_MAX;
for (int i = 0; i < 30; i++)
ptrA[i] = static_cast<float>(rand()) / INT_MAX; // NOLINT
for (int i = 0; i < 30; i++) ptrA[i] = static_cast<float>(0);
compute->Execute();
}
Expand All @@ -253,7 +254,7 @@ TEST(cinn_computation, fc_execute_gpu) {
auto out = outputs[0];

std::vector<float> hostA(30);
for (float &v : hostA) v = static_cast<float>(rand()) / INT_MAX;
for (float &v : hostA) v = static_cast<float>(rand()) / INT_MAX; // NOLINT
compute->SetTensorData(
A, reinterpret_cast<void *>(hostA.data()), hostA.size() * sizeof(float));

Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/hlir/op/contrib/argmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,10 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmax(
ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local");

long prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
if (prod_size > 1 && target.arch == Target::Arch::X86) {
pe::IRScheduleInjectiveCPU(ir_sch, output_shapes.front(), target, true);
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/hlir/op/contrib/argmin.cc
Original file line number Diff line number Diff line change
Expand Up @@ -160,10 +160,10 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgmin(
// exceed the limit.
ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local");
long prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
if (prod_size > 1 && target.arch == Target::Arch::X86) {
pe::IRScheduleInjectiveCPU(ir_sch, output_shapes.front(), target, true);
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/hlir/op/contrib/gather_nd.cc
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,10 @@ std::shared_ptr<framework::OpStrategy> StrategyForGatherNd(
ir::ModuleExpr mod_expr(vec_ast);
ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs();
long prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
if (prod_size > 1) {
if (target.arch == Target::Arch::NVGPU) {
pe::IRCudaScheduleInjective(ir_sch, output_shapes.front(), target);
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/hlir/op/contrib/repeat.cc
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -201,10 +201,10 @@ std::shared_ptr<framework::OpStrategy> StrategyForRepeat(
ir::ModuleExpr mod_expr(vec_ast);
ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs();
long prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
if (prod_size > 1) {
if (target.arch == Target::Arch::NVGPU) {
pe::IRCudaScheduleInjective(ir_sch, output_shapes.front(), target);
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/hlir/op/contrib/resize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,10 +240,10 @@ std::shared_ptr<framework::OpStrategy> StrategyForResize(
ir::ModuleExpr mod_expr(vec_ast);
ir::IRSchedule ir_sch(mod_expr);
ir_sch.MergeExprs();
long prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
if (prod_size > 1) {
if (target.arch == Target::Arch::NVGPU) {
pe::IRCudaScheduleInjective(ir_sch, output_shapes.front(), target);
Expand Down
16 changes: 8 additions & 8 deletions paddle/cinn/hlir/op/contrib/sort.cc
Original file line number Diff line number Diff line change
Expand Up @@ -218,10 +218,10 @@ std::shared_ptr<framework::OpStrategy> StrategyForSort(
ir_sch.SetBuffer(blocks[0], "local");
ir_sch.SetBuffer(blocks[1], "local");

long prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
if (prod_size > 1 && target.arch == Target::Arch::X86) {
pe::IRScheduleInjectiveCPU(ir_sch, output_shapes.front(), target, true);
}
Expand Down Expand Up @@ -311,10 +311,10 @@ std::shared_ptr<framework::OpStrategy> StrategyForArgSort(
// the size will exceed the limit.
// TODO: There is a bug, setting buffer to "local" here will cause the var
// declared twice at CodeGen. ir_sch.SetBuffer(blocks[0], "local");
long prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
int64_t prod_size = std::accumulate(output_shapes[0].begin(),
output_shapes[0].end(),
1,
std::multiplies<int>());
if (prod_size > 1 && target.arch == Target::Arch::X86) {
pe::IRScheduleInjectiveCPU(ir_sch, output_shapes.front(), target, true);
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/runtime/cuda/cublas_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,14 +124,14 @@ inline cublasStatus_t cublasGemmStridedBatched(cudaDataType_t dtype,
float alpha,
const void *A,
int lda,
long long int strideA,
int64_t strideA,
const void *B,
int ldb,
long long int strideB,
int64_t strideB,
float beta,
void *C,
int ldc,
long long int strideC,
int64_t strideC,
int batchCount) {
if (dtype == CUDA_R_32F) {
return cublasSgemmStridedBatched(handle,
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/runtime/cuda/cuda_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1979,13 +1979,13 @@ class CurandGenerator {

curandGenerator_t &GetGenerator() { return generator_; }

CurandGenerator &SetOffset(unsigned long long offset = 0ULL) {
CurandGenerator &SetOffset(uint64_t offset = 0ULL) {
CURAND_CALL(curandSetGeneratorOffset(generator_, offset));
VLOG(4) << "Set curand generator offset to: " << offset;
return *this;
}

CurandGenerator &SetSeed(unsigned long long seed = 0ULL) {
CurandGenerator &SetSeed(uint64_t seed = 0ULL) {
// set global seed if seed is zero
auto rand_seed = (seed == 0ULL) ? RandomSeed::GetOrSet() : seed;
if (rand_seed != 0ULL && rand_seed != seed_) {
Expand All @@ -2009,7 +2009,7 @@ class CurandGenerator {

private:
curandGenerator_t generator_;
unsigned long long seed_ = 0ULL;
uint64_t seed_ = 0ULL;
cudaStream_t stream_ = nullptr;
};

Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/runtime/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -196,16 +196,16 @@ bool GetCinnCudnnDeterministic() {
#endif
}

unsigned long long RandomSeed::seed_ = 0ULL;
uint64_t RandomSeed::seed_ = 0ULL;

unsigned long long RandomSeed::GetOrSet(unsigned long long seed) {
uint64_t RandomSeed::GetOrSet(uint64_t seed) {
if (seed != 0ULL) {
seed_ = seed;
}
return seed_;
}

unsigned long long RandomSeed::Clear() {
uint64_t RandomSeed::Clear() {
auto old_seed = seed_;
seed_ = 0ULL;
return old_seed;
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/runtime/flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,15 @@ bool CanUseNvccCompiler();

class RandomSeed {
public:
static unsigned long long GetOrSet(unsigned long long seed = 0);
static unsigned long long Clear();
static uint64_t GetOrSet(uint64_t seed = 0);
static uint64_t Clear();

private:
RandomSeed() = default;
RandomSeed(const RandomSeed &) = delete;
RandomSeed &operator=(const RandomSeed &) = delete;

static unsigned long long seed_;
static uint64_t seed_;
};

bool IsCompiledWithCUDA();
Expand Down
10 changes: 5 additions & 5 deletions test/cpp/cinn/benchmark/test_all_ops_default.cc
Original file line number Diff line number Diff line change
Expand Up @@ -261,16 +261,16 @@ TEST_DEFAULT1(depthwise_conv2d,

// layout_transform
std::vector<std::vector<int>> shapes_layout_transform = {{512, 512, 3, 3}};
std::string src_layout = "OIHW";
std::string dst_layout = "OIHW16i16o";
std::string src_layout = "OIHW"; // NOLINT
std::string dst_layout = "OIHW16i16o"; // NOLINT
absl::flat_hash_map<std::string, AttrType> attr_store_layout_transform = {
{"src_layout", src_layout}, {"dst_layout", dst_layout}};
TEST_DEFAULT1(
layout_transform, layout_transform, type, type, attr_store_layout_transform)

std::vector<std::vector<int>> shapes_layout_transform1 = {{64, 3, 7, 7}};
std::string src_layout1 = "OIHW";
std::string dst_layout1 = "OIHW3i32o";
std::string src_layout1 = "OIHW"; // NOLINT
std::string dst_layout1 = "OIHW3i32o"; // NOLINT
absl::flat_hash_map<std::string, AttrType> attr_store_layout_transform1 = {
{"src_layout", src_layout1}, {"dst_layout", dst_layout1}};
TEST_DEFAULT1(layout_transform,
Expand All @@ -284,7 +284,7 @@ hlir::framework::NodeAttr attrs;
std::vector<int> kernel_size = {3, 3};
std::vector<int> stride_size = {2, 2};
std::vector<int> padding_size = {1, 1, 1, 1};
std::string pool_type = "max";
std::string pool_type = "max"; // NOLINT
absl::flat_hash_map<std::string, AttrType> attr_store_pool2d = {
{"kernel_size", kernel_size},
{"stride_size", stride_size},
Expand Down