From dbcce28049aeb318cfd76bff83d1e508d7fa9486 Mon Sep 17 00:00:00 2001 From: sxjscience Date: Mon, 20 May 2019 17:06:43 +0800 Subject: [PATCH] fix --- src/operator/nn/layer_norm.cu | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/operator/nn/layer_norm.cu b/src/operator/nn/layer_norm.cu index 0d7bad530311..db09969d6fcb 100644 --- a/src/operator/nn/layer_norm.cu +++ b/src/operator/nn/layer_norm.cu @@ -311,7 +311,7 @@ void LayerNormGPUContig(const LayerNormParam param, } cudaStream_t stream = Stream::GetStream(ctx.get_stream()); const dim3 dimBlock(32, nthread_y); - MXNET_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { + MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional::type AType; int nshared = nthread_y > 1 ? nthread_y * 32 * sizeof(AType) + (nthread_y / 2) * 32 * sizeof(int) : 0; @@ -636,7 +636,7 @@ void LayerNormGradGPUContig(const LayerNormParam param, GetGammaBetaGradKernelParams(nbatch, nchannel, &part_grad_block_dim, &part_grad_grid_dim, &gb_block_dim, &gb_grid_dim, &npart); if (gamma_grad_req != kNullOp || beta_grad_req != kNullOp) { - MXNET_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { + MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional::type AType; Tensor workspace = ctx.requested[0].get_space_typed(Shape1(2 * npart * nchannel), s); @@ -695,7 +695,7 @@ void LayerNormGradGPUContig(const LayerNormParam param, const dim3 data_block_dim(32, nthread_y); const int LOAD_UNROLL = 4; if (data_grad_req != kNullOp) { - MXNET_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { + MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, { typedef typename std::conditional::type AType; int nshared = data_block_dim.y > 1 ? data_block_dim.y * data_block_dim.x * sizeof(AType) : 0; CheckLaunchParam(data_grid_dim, data_block_dim);