From c01d5f7a39f9b1ed06281cff12f8793dd275422a Mon Sep 17 00:00:00 2001 From: Per Goncalves da Silva Date: Wed, 27 Mar 2019 13:22:42 +0100 Subject: [PATCH] Disables failing tests due to cuDNN --- tests/python/gpu/test_gluon_gpu.py | 1 + tests/python/unittest/test_gluon_rnn.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/python/gpu/test_gluon_gpu.py b/tests/python/gpu/test_gluon_gpu.py index 9eeeec749211..fcffeedc036a 100644 --- a/tests/python/gpu/test_gluon_gpu.py +++ b/tests/python/gpu/test_gluon_gpu.py @@ -227,6 +227,7 @@ def test_rnn_layer_begin_state_type(): modeling_layer(fake_data) +@unittest.skip("test fails due to cuDNN arch missmatch. temporarily disabled till it gets fixed. See /~https://github.com/apache/incubator-mxnet/issues/14502") def test_gluon_ctc_consistency(): loss = mx.gluon.loss.CTCLoss() data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0) diff --git a/tests/python/unittest/test_gluon_rnn.py b/tests/python/unittest/test_gluon_rnn.py index b410362c8fd1..266cebb5c61e 100644 --- a/tests/python/unittest/test_gluon_rnn.py +++ b/tests/python/unittest/test_gluon_rnn.py @@ -541,6 +541,7 @@ def test_rnn_layers_fp32(): @assert_raises_cudnn_not_satisfied(min_version='5.1.10') @unittest.skipIf(mx.context.num_gpus() == 0, "RNN FP16 only implemented for GPU for now") +@unittest.skip("test fails due to cuDNN arch missmatch. temporarily disabled till it gets fixed. See /~https://github.com/apache/incubator-mxnet/issues/14502") def test_rnn_layers_fp16(): run_rnn_layers('float16', 'float32', mx.gpu())