diff --git a/CMakeLists.txt b/CMakeLists.txt index 3b8bbd2e0272..c65e91bbec31 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -187,6 +187,7 @@ if(USE_TENSORRT) include_directories(${ONNX_PATH}) include_directories(3rdparty/onnx-tensorrt/) include_directories(3rdparty/) + include_directories(3rdparty/onnx-tensorrt/third_party/onnx/) add_definitions(-DMXNET_USE_TENSORRT=1) add_definitions(-DONNX_NAMESPACE=onnx) diff --git a/Jenkinsfile b/Jenkinsfile index a7e3dbd9bcc0..89a2c34e3be5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -60,7 +60,7 @@ mx_cmake_lib = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/dmlc-core/li mx_cmake_lib_debug = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests' mx_cmake_mkldnn_lib = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so, build/3rdparty/mkldnn/src/libmkldnn.so.0' mx_mkldnn_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libiomp5.so, lib/libmkldnn.so.0, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a' -mx_tensorrt_lib = 'lib/libmxnet.so, lib/libnvonnxparser_runtime.so.0, lib/libnvonnxparser.so.0, lib/libonnx_proto.so, lib/libonnx.so' +mx_tensorrt_lib = 'build/libmxnet.so, lib/libnvonnxparser_runtime.so.0, lib/libnvonnxparser.so.0, lib/libonnx_proto.so, lib/libonnx.so' mx_lib_cpp_examples = 'lib/libmxnet.so, lib/libmxnet.a, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/lenet, build/cpp-package/example/alexnet, build/cpp-package/example/googlenet, build/cpp-package/example/lenet_with_mxdataiter, build/cpp-package/example/resnet, build/cpp-package/example/mlp, build/cpp-package/example/mlp_cpu, build/cpp-package/example/mlp_gpu, build/cpp-package/example/test_score, build/cpp-package/example/test_optimizer' mx_lib_cpp_examples_cpu = 'build/libmxnet.so, build/cpp-package/example/mlp_cpu' diff --git a/ci/docker/install/ubuntu_core.sh b/ci/docker/install/ubuntu_core.sh index 64f8af3e0444..35430f2d3aa5 100755 --- a/ci/docker/install/ubuntu_core.sh +++ b/ci/docker/install/ubuntu_core.sh @@ -26,7 +26,6 @@ apt-get install -y \ apt-transport-https \ build-essential \ ca-certificates \ - cmake \ curl \ git \ libatlas-base-dev \ @@ -41,3 +40,12 @@ apt-get install -y \ sudo \ unzip \ wget + +# Note: we specify an exact cmake version to work around a cmake 3.10 CUDA 10 issue. +# Reference: /~https://github.com/clab/dynet/issues/1457 +mkdir /opt/cmake && cd /opt/cmake +wget -nv https://cmake.org/files/v3.12/cmake-3.12.4-Linux-x86_64.sh +sh cmake-3.12.4-Linux-x86_64.sh --prefix=/opt/cmake --skip-license +ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake +rm cmake-3.12.4-Linux-x86_64.sh +cmake --version diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 39631f9dc7e6..f2761331b246 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -521,22 +521,23 @@ build_ubuntu_gpu_tensorrt() { cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser_runtime.so.0 /work/mxnet/lib/ cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser.so.0 /work/mxnet/lib/ - rm -rf build - make \ - DEV=1 \ - ENABLE_TESTCOVERAGE=1 \ - USE_BLAS=openblas \ - USE_CUDA=1 \ - USE_CUDA_PATH=/usr/local/cuda \ - USE_CUDNN=1 \ - USE_OPENCV=0 \ - USE_DIST_KVSTORE=0 \ - USE_TENSORRT=1 \ - USE_JEMALLOC=0 \ - USE_GPERFTOOLS=0 \ - ONNX_NAMESPACE=onnx \ - CUDA_ARCH="-gencode arch=compute_70,code=compute_70" \ - -j$(nproc) + cd /work/build + cmake -DUSE_CUDA=1 \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DUSE_CUDNN=1 \ + -DUSE_OPENCV=1 \ + -DUSE_TENSORRT=1 \ + -DUSE_OPENMP=0 \ + -DUSE_MKLDNN=0 \ + -DUSE_MKL_IF_AVAILABLE=OFF \ + -DENABLE_TESTCOVERAGE=ON \ + -DCUDA_ARCH_NAME=Manual \ + -DCUDA_ARCH_BIN=$CI_CMAKE_CUDA_ARCH_BIN \ + -G Ninja \ + /work/mxnet + + ninja -v } build_ubuntu_gpu_mkldnn() {