diff --git a/src/operator/image/image_random-inl.h b/src/operator/image/image_random-inl.h index 87c85b94b7a4..0f4d173be79a 100644 --- a/src/operator/image/image_random-inl.h +++ b/src/operator/image/image_random-inl.h @@ -334,7 +334,6 @@ void NormalizeOpForward(const nnvm::NodeAttrs &attrs, CHECK_EQ(req.size(), 1U); const NormalizeParam ¶m = nnvm::get(attrs.parsed); - int N, C, H, W; // Mean and Std can be 1 or 3D only. std::vector mean(3); @@ -360,27 +359,28 @@ void NormalizeOpForward(const nnvm::NodeAttrs &attrs, mshadow::Stream *s = ctx.get_stream(); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { + int N, C, H, W; + DType *input = nullptr; + DType *output = nullptr; if (inputs[0].ndim() == 3) { N = 1; C = static_cast(inputs[0].shape_[0]); H = static_cast(inputs[0].shape_[1]); W = static_cast(inputs[0].shape_[2]); - Tensor input = inputs[0].get(s); - Tensor output = outputs[0].get(s); - NormalizeImplCUDA(s, input.dptr_, output.dptr_, req_type, - N, C, H, W, mean[0], mean[1], mean[2], - std[0], std[1], std[2]); + input = (inputs[0].get(s)).dptr_; + output = (outputs[0].get(s)).dptr_; } else { N = static_cast(inputs[0].shape_[0]); C = static_cast(inputs[0].shape_[1]); H = static_cast(inputs[0].shape_[2]); W = static_cast(inputs[0].shape_[3]); - Tensor input = inputs[0].get(s); - Tensor output = outputs[0].get(s); - NormalizeImplCUDA(s, input.dptr_, output.dptr_, req_type, - N, C, H, W, mean[0], mean[1], mean[2], - std[0], std[1], std[2]); + input = (inputs[0].get(s)).dptr_; + output = (outputs[0].get(s)).dptr_; } + NormalizeImplCUDA(s, input, output, req_type, + N, C, H, W, + mean[0], mean[1], mean[2], + std[0], std[1], std[2]); }); }); #else @@ -466,7 +466,6 @@ void NormalizeOpBackward(const nnvm::NodeAttrs &attrs, std[1] = param.std[1]; std[2] = param.std[2]; } - int N, C, H, W; // Note: inputs[0] is out_grad const TBlob& in_data = inputs[1]; @@ -476,27 +475,27 @@ void NormalizeOpBackward(const nnvm::NodeAttrs &attrs, mshadow::Stream *s = ctx.get_stream(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { + int N, C, H, W; + DType *in_grad = nullptr; + DType *out_grad = nullptr; if (in_data.ndim() == 3) { N = 1; C = static_cast(in_data.shape_[0]); H = static_cast(in_data.shape_[1]); W = static_cast(in_data.shape_[2]); - Tensor out_grad = inputs[0].get(s); - Tensor in_grad = outputs[0].get(s); - NormalizeBackwardImplCUDA(s, out_grad.dptr_, in_grad.dptr_, - req_type, N, C, H, W, std[0], std[1], - std[2]); + out_grad = (inputs[0].get(s)).dptr_; + in_grad = (outputs[0].get(s)).dptr_; } else { N = static_cast(in_data.shape_[0]); C = static_cast(in_data.shape_[1]); H = static_cast(in_data.shape_[2]); W = static_cast(in_data.shape_[3]); - Tensor out_grad = inputs[0].get(s); - Tensor in_grad = outputs[0].get(s); - NormalizeBackwardImplCUDA(s, out_grad.dptr_, in_grad.dptr_, - req_type, N, C, H, W, std[0], std[1], - std[2]); + out_grad = (inputs[0].get(s)).dptr_; + in_grad = (outputs[0].get(s)).dptr_; } + NormalizeBackwardImplCUDA(s, out_grad, in_grad, req_type, + N, C, H, W, + std[0], std[1], std[2]); }); }); #else