From cf527e034434176180c92a12988459430ab3ed8f Mon Sep 17 00:00:00 2001 From: "Li, Hao H" Date: Mon, 10 Dec 2018 16:11:30 +0800 Subject: [PATCH] check if MKL in BUILD_TAG in test case --- .../quantization/quantized_fully_connected.cc | 2 +- tests/python/quantization/test_quantization.py | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/operator/quantization/quantized_fully_connected.cc b/src/operator/quantization/quantized_fully_connected.cc index 2a3b430f677b..3dab4b403757 100644 --- a/src/operator/quantization/quantized_fully_connected.cc +++ b/src/operator/quantization/quantized_fully_connected.cc @@ -127,7 +127,7 @@ struct QuantizedSumInitKernelWithBias { out[i] = bias[i] * float_for_one_bias_quant / float_for_one_out_quant; } else { - LOG(INFO) << "WARNING: float_for_one_out_quant is 0, need to check min/max data !"; + LOG(INFO) << "float_for_one_out_quant is 0, need to check min/max data !"; out[i] = 0; } } diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py index ed2bb3ad4410..1f35251902f6 100644 --- a/tests/python/quantization/test_quantization.py +++ b/tests/python/quantization/test_quantization.py @@ -270,8 +270,15 @@ def check_quantized_pooling(data_shape, kernel, pool_type, pad, stride, global_p def test_quantized_fc(): def check_quantized_fc(data_shape, num_hidden, no_bias, qdtype, flatten=True): if mx.current_context().device_type != 'gpu': - print('skipped testing quantized_fc on cpu since s8u8s32 is only supported by MKL BLAS library') - return + hasMKL = False; + for key in os.environ.keys(): + if cmp(key, "BUILD_TAG") == 0: + if os.environ['BUILD_TAG'].find("MKL") != -1: + hasMKL = True + break + if hasMKL == False: + print('skipped testing quantized_fc on cpu since s8u8s32 is only supported by MKL BLAS library') + return elif qdtype == 'uint8' and is_test_for_gpu(): print('skipped testing quantized_fc for gpu uint8 since it is not supported yet') return