From 64e8555c454a60e79904be917ef9cf0793f1acd3 Mon Sep 17 00:00:00 2001 From: Dick Carter Date: Fri, 28 May 2021 21:56:14 -0700 Subject: [PATCH] Mark cuDNN Dropout as fully CUDA Graphs compatible. Reenable tests. --- src/operator/nn/dropout.cu | 16 ++++++++++++---- tests/python/gpu/test_gluon_gpu.py | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/operator/nn/dropout.cu b/src/operator/nn/dropout.cu index bff9b020126c..5e8f55881af6 100644 --- a/src/operator/nn/dropout.cu +++ b/src/operator/nn/dropout.cu @@ -30,10 +30,18 @@ namespace op { NNVM_REGISTER_OP(Dropout) .set_attr("FIsCUDAGraphsCompatible", - [](const NodeAttrs&, const bool is_train) { - // Dropout is just passthrough during inference - return !is_train; - }) + [](const NodeAttrs& attrs, const bool is_train) { + // Dropout is just passthrough during inference for all impls + if (!is_train) + return true; + + // cuDNN impl is compatible during training as well + const DropoutParam& param = nnvm::get(attrs.parsed); + real_t pkeep = 1.0f - param.p; + bool cudnn_off = param.cudnn_off && param.cudnn_off.value(); + bool cudnn_available = pkeep > 0 && !cudnn_off; + return MXNET_USE_CUDNN_DROPOUT && cudnn_available; + }) .set_attr("FStatefulCompute", DropoutCompute); NNVM_REGISTER_OP(_backward_Dropout) diff --git a/tests/python/gpu/test_gluon_gpu.py b/tests/python/gpu/test_gluon_gpu.py index f58322d101bb..af6805c9cd5c 100644 --- a/tests/python/gpu/test_gluon_gpu.py +++ b/tests/python/gpu/test_gluon_gpu.py @@ -633,7 +633,7 @@ def generate_inputs(self): TestDesc('ConvTranspose', lambda: mx.gluon.nn.Conv2DTranspose(channels=32, kernel_size=(1,1))), TestDesc('Dense', lambda: mx.gluon.nn.Dense(units=128)), TestDesc('Activation', lambda: mx.gluon.nn.Activation('tanh')), - #TestDesc('Dropout', lambda: mx.gluon.nn.Dropout(0.5)), + TestDesc('Dropout', lambda: mx.gluon.nn.Dropout(0.5)), TestDesc('Flatten', lambda: mx.gluon.nn.Flatten()), TestDesc('MaxPool', lambda: mx.gluon.nn.MaxPool2D()), TestDesc('AvgPool', lambda: mx.gluon.nn.AvgPool2D()),