Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fix warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
larroy committed May 17, 2019
1 parent c14bf0a commit ffbeab3
Show file tree
Hide file tree
Showing 7 changed files with 62 additions and 4 deletions.
4 changes: 3 additions & 1 deletion src/operator/contrib/multibox_detection.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,10 @@ inline void MultiBoxDetectionForward(const Tensor<cpu, 3, DType> &out,
const int num_anchors = cls_prob.size(2);
const int num_batches = cls_prob.size(0);
const DType *p_anchor = anchors.dptr_;

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
std::vector<DType> outputs;
outputs.reserve(num_anchors * 6);
for (int nbatch = 0; nbatch < num_batches; ++nbatch) {
Expand Down
3 changes: 3 additions & 0 deletions src/operator/contrib/psroi_pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,10 @@ template <typename DType>
const int output_dim,
const int group_size,
DType* top_data) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
#pragma omp parallel for num_threads(omp_threads)
for (int index = 0; index < count; index++) {
// The output is in order (n, ctop, ph, pw)
Expand Down
3 changes: 3 additions & 0 deletions src/operator/l2_normalization.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ class L2NormalizationOpCPU : public L2NormalizationOp<cpu, DType> {
CHECK_EQ(out_data.size(), 2U);
Stream<cpu> *s = ctx.get_stream<cpu>();
mxnet::TShape orig_shape = in_data[l2_normalization::kData].shape_;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
auto omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (this->param_.mode == l2_normalization::kInstance) {
Shape<2> dshape = Shape2(orig_shape[0],
orig_shape.ProdShape(1, orig_shape.ndim()));
Expand Down
2 changes: 1 addition & 1 deletion src/operator/nn/dropout-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ class DropoutOp {
#endif // MXNET_USE_CUDNN_DROPOUT
}; // class DropoutOp

static OpStatePtr CreateDropoutState(const nnvm::NodeAttrs &attrs,
inline OpStatePtr CreateDropoutState(const nnvm::NodeAttrs &attrs,
const Context ctx,
const mxnet::ShapeVector &in_shapes,
const std::vector<int> &in_types) {
Expand Down
50 changes: 48 additions & 2 deletions src/operator/rnn_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,10 @@ void LstmForwardTrainingSingleLayer(DType* ws,
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
Expand Down Expand Up @@ -151,7 +153,10 @@ void LstmForwardTraining(DType* ws,
const int cell_size = N * H;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
int idx = 0; // state & cell state's idx;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Expand Down Expand Up @@ -233,8 +238,10 @@ void LstmForwardInferenceSingleLayer(DType* ws,
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
Expand Down Expand Up @@ -360,7 +367,10 @@ void LstmBackwardSingleLayer(DType* ws,
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
Expand Down Expand Up @@ -570,7 +580,10 @@ void LstmBackward(DType* ws,
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
Expand Down Expand Up @@ -625,7 +638,10 @@ void GruForwardInferenceSingleLayer(DType* ws,
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
Expand Down Expand Up @@ -837,7 +853,10 @@ void GruForwardTrainingSingleLayer(DType* ws,
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
Expand Down Expand Up @@ -1001,7 +1020,10 @@ void GruForwardTraining(DType* ws,
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
Expand Down Expand Up @@ -1033,7 +1055,10 @@ void GruForwardTraining(DType* ws,
}
wh_l = wx_l + I * 3 * H;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
Expand Down Expand Up @@ -1099,7 +1124,10 @@ void GruBackwardSingleLayer(DType* ws,
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
Expand Down Expand Up @@ -1444,7 +1472,10 @@ void GruBackward(DType* ws,
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
Expand Down Expand Up @@ -1539,7 +1570,10 @@ void VanillaRNNForwardInferenceSingleLayer(DType* ws,
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
Expand Down Expand Up @@ -1740,7 +1774,10 @@ void VanillaRNNForwardTrainingSingleLayer(DType* ws,
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
Expand Down Expand Up @@ -1880,7 +1917,10 @@ void VanillaRNNForwardTraining(DType* ws,
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
Expand Down Expand Up @@ -1972,7 +2012,10 @@ void VanillaRNNBackwardSingleLayer(DType* ws,
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
Expand Down Expand Up @@ -2299,7 +2342,10 @@ void VanillaRNNBackward(DType* ws,
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma GCC diagnostic pop
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
Expand Down
3 changes: 3 additions & 0 deletions src/operator/tensor/ordering_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,10 @@ MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
const bool full_sort(K*8 > N);
// Batch size.
const int M(work.size(0)/(sizeof(DType)*N));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma GCC diagnostic pop
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
Expand Down
1 change: 1 addition & 0 deletions tests/cpp/misc/libinfo_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,5 @@ using namespace mxnet::features;
TEST(RuntimeTest, RuntimeTestAll) {
EXPECT_EQ(EnumNames::names.size(), MAX_FEATURES);
const auto& features = LibInfo::getInstance()->getFeatures();
EXPECT_GE(features.size(), 0);
}

0 comments on commit ffbeab3

Please sign in to comment.