diff --git a/.gitignore b/.gitignore
index c8a813649bb8..7eb8e7d6e777 100644
--- a/.gitignore
+++ b/.gitignore
@@ -167,13 +167,11 @@ python/.eggs
tests/Makefile
tests/mxnet_unit_tests
-# generated wrappers for ccache
-cc
-cxx
-
# Code coverage related
.coverage
*.gcov
*.gcno
coverage.xml
+# Local CMake build config
+cmake_options.yml
diff --git a/3rdparty/mshadow b/3rdparty/mshadow
index 696803bd7723..6dc04f7c729c 160000
--- a/3rdparty/mshadow
+++ b/3rdparty/mshadow
@@ -1 +1 @@
-Subproject commit 696803bd7723ade8230af878460d96c68a550fbc
+Subproject commit 6dc04f7c729cd5c6c6210d5d4d2026a26ce0bfbf
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 161705643194..3e3de2053477 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -20,7 +20,7 @@ mxnet_option(USE_F16C "Build with x86 F16C instruction support" ON)
mxnet_option(USE_LAPACK "Build with lapack support" ON)
mxnet_option(USE_MKL_IF_AVAILABLE "Use MKL if found" ON)
mxnet_option(USE_MKLML_MKL "Use MKLDNN variant of MKL (if MKL found)" ON IF USE_MKL_IF_AVAILABLE AND (NOT APPLE))
-mxnet_option(USE_MKLDNN "Use MKLDNN variant of MKL (if MKL found)" ON IF USE_MKL_IF_AVAILABLE AND (NOT APPLE))
+mxnet_option(USE_MKLDNN "Use MKLDNN variant of MKL (if MKL found)" ON IF USE_MKL_IF_AVAILABLE AND (NOT APPLE) AND (NOT MSVC) AND (CMAKE_SYSTEM_PROCESSOR MATCHES x86_64))
mxnet_option(USE_OPERATOR_TUNING "Enable auto-tuning of operators" ON IF NOT MSVC)
mxnet_option(USE_GPERFTOOLS "Build with GPerfTools support (if found)" ON)
mxnet_option(USE_JEMALLOC "Build with Jemalloc support" ON)
@@ -215,7 +215,7 @@ if(ENABLE_TESTCOVERAGE)
if(NOT GCOV_PATH)
message(FATAL_ERROR "gcov not found! Aborting...")
endif() # NOT GCOV_PATH
-
+
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --coverage")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --coverage")
set(CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} --coverage")
@@ -227,7 +227,6 @@ if(USE_MKLDNN)
include(cmake/DownloadMKLML.cmake)
# CPU architecture (e.g., C5) can't run on another architecture (e.g., g3).
if(NOT MSVC)
- set(MKLDNN_LIBRARY_TYPE "STATIC" CACHE INTERNAL "" FORCE)
set(ARCH_OPT_FLAGS "-mtune=generic")
else()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /EHsc")
@@ -733,7 +732,12 @@ install(TARGETS ${MXNET_INSTALL_TARGETS}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
)
+# NOTE: Public headers will be installed into ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}, see
+# https://cmake.org/cmake/help/v3.0/variable/CMAKE_INSTALL_PREFIX.html
+# https://cmake.org/cmake/help/v3.0/module/GNUInstallDirs.html
+
install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+install(DIRECTORY 3rdparty/tvm/nnvm/include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
if (INSTALL_EXAMPLES)
install(DIRECTORY example DESTINATION ${CMAKE_INSTALL_DATADIR}/${PROJECT_NAME})
endif()
diff --git a/CODEOWNERS b/CODEOWNERS
index 5a88e89dfb02..8b48257ebf83 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -12,15 +12,18 @@
* @apache/mxnet-committers
# Language bindings
-/R-package/ @thirdwing
-/scala-package/ @yzhliu @nswamy
-/perl-package/ @sergeykolychev
-/python/ @szha
-/contrib/clojure-package/ @gigasquid
+/R-package/ @thirdwing
+/scala-package/ @yzhliu @nswamy @pllarroy
+/perl-package/ @sergeykolychev
+/python/ @szha @pllarroy
+/python/mxnet/kvstore.py @eric-haibin-lin
+/python/mxnet/optimizer/ @eric-haibin-lin
+/python/mxnet/gluon/trainer.py @eric-haibin-lin
+/contrib/clojure-package/ @gigasquid
# C++ base
/src/kvstore/ @rahul003 @anirudh2290
-/include/ @anirudh2290
+/include/ @anirudh2290 @pllarroy
/src/c_api/ @anirudh2290
/src/common/ @anirudh2290
/src/engine/ @anirudh2290
@@ -31,15 +34,20 @@
/src/nnvm/ @anirudh2290
/src/operator/ @anirudh2290
/src/profiler/ @anirudh2290
+/src/kvstore/ @eric-haibin-lin
/src/storage/ @anirudh2290
/tests/cpp/ @anirudh2290
-/cpp-package/ @nswamy
+/cpp-package/ @nswamy @pllarroy
+/src/ @pllarroy
+/plugin/ @pllarroy
# CMake
-CMakeLists.txt @szha @rahul003
-/cmake/ @szha @rahul003
+CMakeLists.txt @szha @rahul003 @pllarroy
+/cmake/ @szha @rahul003 @pllarroy
# MXNet CI
+dev_menu.py @pllarroy
+/ci/ @pllarroy
/tests/ci_build/ @marcoabreu
Jenkinsfile @marcoabreu
.travis.yml @marcoabreu
@@ -50,16 +58,16 @@ Makefile @szha
prepare_mkl.sh @szha
# Docs
-/docs/ @szha
+/docs/ @szha @pllarroy
# Submodules
.gitmodules @szha
# Examples
-/example/ @szha
+/example/ @szha @pllarroy
# Tools
-/tools/ @szha
+/tools/ @szha @pllarroy
# Github templates
/.github/ @szha
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index b9f84d592a70..5b5fdce712f1 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -193,6 +193,7 @@ List of Contributors
* [Yuxi Hu](/~https://github.com/yuxihu)
* [Harsh Patel](/~https://github.com/harshp8l)
* [Xiao Wang](/~https://github.com/BeyonderXX)
+* [Piyush Ghai](/~https://github.com/piyushghai)
Label Bot
---------
diff --git a/LICENSE b/LICENSE
index 10dea2522182..0b4841a8b0dc 100644
--- a/LICENSE
+++ b/LICENSE
@@ -217,21 +217,30 @@
1. MXNet Cpp-package - For details, /cpp-package/LICENSE
2. MXNet rcnn - For details, see, example/rcnn/LICENSE
- 3. scala-package - For details, see, scala-package/LICENSE
+ 3. MXNet scala-package - For details, see, scala-package/LICENSE
4. Warp-CTC - For details, see, 3rdparty/ctc_include/LICENSE
5. 3rdparty/dlpack - For details, see, 3rdparty/dlpack/LICENSE
6. 3rdparty/dmlc-core - For details, see, 3rdparty/dmlc-core/LICENSE
7. 3rdparty/mshadow - For details, see, 3rdparty/mshadow/LICENSE
8. 3rdparty/tvm - For details, see, 3rdparty/tvm/LICENSE
- 9. 3rdparty/tvm/dmlc-core - For details, see, 3rdparty/tvm/dmlc-core/LICENSE
+ 9. 3rdparty/tvm/dmlc-core - For details, see, 3rdparty/tvm/3rdparty/dmlc-core/LICENSE
10. 3rdparty/tvm/dlpack - For details, see, 3rdparty/tvm/3rdparty/dlpack/LICENSE
- 11. 3rdparty/tvm/nnvm - For details, see, 3rdparty/tvm/nnvm/LICENSE
- 12. 3rdparty/ps-lite - For details, see, 3rdparty/ps-lite/LICENSE
- 13. 3rdparty/mkldnn - For details, see, 3rdparty/mkldnn/LICENSE
- 14. googlemock scripts/generator - For details, see, 3rdparty/googletest/googlemock/scripts/generator/LICENSE
- 15. clojure-package - For details, see, contrib/clojure-package/LICENSE
- 16. R-package - For details, see, R-package/LICENSE
- 17. ONNX-TensorRT benchmark package - For details, see, 3rdparty/onnx-tensorrt/third_party/onnx/third_party/benchmark/LICENSE
+ 11. 3rdparty/ps-lite - For details, see, 3rdparty/ps-lite/LICENSE
+ 12. 3rdparty/mkldnn - For details, see, 3rdparty/mkldnn/LICENSE
+ 13. googlemock scripts/generator - For details, see, 3rdparty/googletest/googlemock/scripts/generator/LICENSE
+ 14. MXNet clojure-package - For details, see, contrib/clojure-package/LICENSE
+ 15. MXNet R-package - For details, see, R-package/LICENSE
+ 16. ONNX-TensorRT benchmark package - For details, see, 3rdparty/onnx-tensorrt/third_party/onnx/third_party/benchmark/LICENSE
+ 17. Dockerfiles - For details, see docker/Dockerfiles/License.md
+ 18. MXNet Julia Package - For details, see julia/LICENSE.md
+ 19. Benchdnn - For details, see 3rdparty/mkldnn/tests/benchdnn/README.md
+ 20. MXNet perl-package - For details, see perl-package/README
+ 21. MXNet perl-package AI-MXNET - For details, see perl-package/AI-MXNet/README
+ 22. MXNet perl-package AI-MXNET Gluon Contrib - For details, see perl-package/AI-MXNet-Gluon-Contrib/README
+ 23. MXNet perl-package AI-MXNET Gluon ModelZoo - For details, see perl-package/AI-MXNet-Gluon-ModelZoo/README
+ 24. MXNet perl-package AI-MXNETCAPI - For details, see perl-package/AI-MXNetCAPI/README
+ 25. MXNet perl-package AI-NNVMCAPI - For details, see perl-package/AI-NNVMCAPI/README
+ 26. Cephes Library Functions - For details, see src/operator/special_functions-inl.h
=======================================================================================
@@ -242,75 +251,64 @@
2. Faster R-CNN - For details, see example/rcnn/LICENSE
3. tree_lstm - For details, see example/gluon/tree_lstm/LICENSE
4. OpenMP - For details, see 3rdparty/openmp/LICENSE.txt
- 5. HalideIR - For details, see nnvm/tvm/HalideIR/LICENSE
6. HalideIR - For details, see 3rdparty/tvm/3rdparty/HalideIR/LICENSE
7. ONNX-TensorRT - For details, see 3rdparty/onnx-tensorrt/LICENSE
8. ONNX-TensorRT - For details, see 3rdparty/onnx-tensorrt/third_party/onnx/LICENSE
+ 9. clipboard.js - Refer to https://zenorocha.github.io/clipboard.js
+ 10. clipboard.min.js - Refer to https://zenorocha.github.io/clipboard.js
=======================================================================================
- NVIDIA Licenses
+ 3-clause BSD licenses
=======================================================================================
- 1. Moderngpu
- For details, see, 3rdparty/ctc_include/contrib/moderngpu/LICENSE
-
- /******************************************************************************
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the NVIDIA CORPORATION nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- ******************************************************************************/
-
- 2. CUB Library
- For details, see, 3rdparty/cub/LICENSE.TXT
+ 1. Xbyak - For details, see 3rdparty/mkldnn/src/cpu/xbyak/COPYRIGHT
+ 2. gtest - For details, see, 3rdparty/mkldnn/tests/gtests/gtest/LICENSE
+ 3. Moderngpu - For details, see, 3rdparty/ctc_include/contrib/moderngpu/LICENSE
+ 4. CUB Library - For details, see, 3rdparty/cub/LICENSE.TXT
+ 5. Googlemock - For details, see, 3rdparty/googletest/googlemock/LICENSE
+ 6. Googletest - For details, see, 3rdparty/googletest/googletest/LICENSE
+ 7. OpenMP Testsuite - For details, see, 3rdparty/openmp/testsuite/LICENSE
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the NVIDIA CORPORATION nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ =======================================================================================
+ 2-clause BSD licenses
+ =======================================================================================
+
+ 1. Sphinx JavaScript utilties for the full-text search - For details, see, docs/_static/searchtools_custom.js
+ 2. blockingconcurrentqueue.h - For details, see, 3rdparty/dmlc-core/include/dmlc/blockingconcurrentqueue.h
+ 3. concurrentqueue.h - For details, see, 3rdparty/dmlc-core/include/dmlc/concurrentqueue.h
+ 4. MSCOCO Toolbox - For details, see, example/ssd/dataset/pycocotools/coco.py
+
=======================================================================================
Other Licenses
=======================================================================================
- 1. Caffe
- For details, see, example/rcnn/LICENSE
+ 1. Caffe - For details, see, example/rcnn/LICENSE
+ 2. pool.h - For details, see, src/operator/nn/pool.h
+ 3. pool.cuh - For details, see, src/operator/nn/pool.cuh
+ 4. im2col.h - For details, see, src/operator/nn/im2col.h
+ 5. im2col.cuh - For details, see, src/operator/nn/im2col.cuh
+ 6. deformable_im2col.h - For details, see, src/operator/contrib/nn/deformable_im2col.h
+ 7. deformable_im2col.cuh - For details, see, src/operator/contrib/nn/deformable_im2col.cuh
+
+ COPYRIGHT
+
+ All contributions by the University of California:
+ Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
+ All rights reserved.
+
+ All other contributions:
+ Copyright (c) 2014, 2015, the respective contributors
+ All rights reserved.
+
+ Caffe uses a shared copyright model: each contributor holds copyright over
+ their contributions to Caffe. The project versioning records all such
+ contribution and copyright details. If a contributor wants to further mark
+ their specific copyright on a particular contribution, they should indicate
+ their copyright solely in the commit message of the change when it is
+ committed.
LICENSE
@@ -342,7 +340,7 @@
=======================================================================================
- 2. MS COCO API
+ 8. MS COCO API
For details, see, example/rcnn/LICENSE
Redistribution and use in source and binary forms, with or without
@@ -371,155 +369,14 @@
=======================================================================================
- 3. Sphinx JavaScript utilties for the full-text search
- For details, see, docs/_static/searchtools_custom.js
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- =======================================================================================
-
- 4. FindCrypto.cmake
- For details, see, 3rdparty/dmlc-core/cmake/Modules/FindCrypto.cmake,
- Redistribution and use is allowed according to the terms of the BSD license.
-
- =======================================================================================
-
- 5. Googlemock
- For details, see, 3rdparty/googletest/googlemock/LICENSE
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- =======================================================================================
-
- 6. Googletest
- For details, see, 3rdparty/googletest/googletest/LICENSE
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- =======================================================================================
-
- 7. OpenMP Testsuite
- For details, see, 3rdparty/openmp/testsuite/LICENSE
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- o Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- o Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- o Neither the name of the University of Houston System nor the names of its
- contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- =======================================================================================
-
- 8. Semaphore implementation in blockingconcurrentqueue.h
+ 9. Semaphore implementation in blockingconcurrentqueue.h
This file uses a semaphore implementation under the terms of its separate zlib license.
For details, see, 3rdparty/dmlc-core/include/dmlc/blockingconcurrentqueue.h
=======================================================================================
- 9. blockingconcurrentqueue.h
- This file is Distributed under the terms of the simplified BSD license.
- For details, see, 3rdparty/dmlc-core/include/dmlc/blockingconcurrentqueue.h
-
- =======================================================================================
-
- 10. concurrentqueue.h
- This file is Distributed under the terms of the simplified BSD license.
- For details, see, 3rdparty/dmlc-core/include/dmlc/concurrentqueue.h
-
- =======================================================================================
-
- 11. ONNX Export module
- For details, see, python/mxnet/contrib/onnx/_export/LICENSE
+ 10. ONNX Export module
+ For details, see, python/mxnet/contrib/onnx/mx2onnx/LICENSE
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -568,41 +425,7 @@
=======================================================================================
- 12. Google tests
- For details, see, 3rdparty/mkldnn/tests/gtests/gtest/LICENSE
-
- Copyright 2008, Google Inc.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- =======================================================================================
-
- 13. ONNX python bindings
+ 11. ONNX python bindings
For details, see, 3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/LICENSE
Copyright (c) 2016 Wenzel Jakob , All rights reserved.
@@ -642,3 +465,155 @@
other computer software, distribute, and sublicense such enhancements or
derivative works thereof, in binary and source code form.
+ =======================================================================================
+
+ 12. Clang
+ For details, see, 3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/tools/clang/LICENSE.TXT
+
+ LLVM Release License
+ University of Illinois/NCSA
+ Open Source License
+
+ Copyright (c) 2007-2012 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
+ this software and associated documentation files (the "Software"), to deal with
+ the Software without restriction, including without limitation the rights to
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is furnished to do
+ so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+ SOFTWARE.
+
+ The LLVM software contains code written by third parties. Such software will
+ have its own individual LICENSE.TXT file in the directory in which it appears.
+ This file will describe the copyrights, license, and restrictions which apply
+ to that code.
+
+ The disclaimer of warranty in the University of Illinois Open Source License
+ applies to all code in the LLVM Distribution, and nothing in any of the
+ other licenses gives permission to use the names of the LLVM Team or the
+ University of Illinois to endorse or promote products derived from this
+ Software.
+
+ The following pieces of software have additional or alternate copyrights,
+ licenses, and/or restrictions:
+
+ Program Directory
+ ------- ---------
+
+
+ =======================================================================================
+
+ 13. MKL BLAS
+ For details, see, [Intel® Simplified license](https://software.intel.com/en-us/license/intel-simplified-software-license).
+
+ Copyright (c) 2018 Intel Corporation.
+
+ Use and Redistribution. You may use and redistribute the software (the “Software”), without modification, provided the following conditions are met:
+
+ * Redistributions must reproduce the above copyright notice and the following terms of use in the Software and in the documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Intel nor the names of its suppliers may be used to endorse or promote products derived from this Software without specific prior written permission.
+
+ * No reverse engineering, decompilation, or disassembly of this Software is permitted.
+
+ Limited patent license. Intel grants you a world-wide, royalty-free, non-exclusive license under patents it now or hereafter owns or controls to make, have made, use, import, offer to sell and sell (“Utilize”) this Software, but solely to the extent that any such patent is necessary to Utilize the Software alone. The patent license shall not apply to any combinations which include this software. No hardware per se is licensed hereunder.
+
+ Third party and other Intel programs. “Third Party Programs” are the files listed in the “third-party-programs.txt” text file that is included with the Software and may include Intel programs under separate license terms. Third Party Programs, even if included with the distribution of the Materials, are governed by separate license terms and those license terms solely govern your use of those programs.
+
+ DISCLAIMER. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT ARE DISCLAIMED. THIS SOFTWARE IS NOT INTENDED FOR USE IN SYSTEMS OR APPLICATIONS WHERE FAILURE OF THE SOFTWARE MAY CAUSE PERSONAL INJURY OR DEATH AND YOU AGREE THAT YOU ARE FULLY RESPONSIBLE FOR ANY CLAIMS, COSTS, DAMAGES, EXPENSES, AND ATTORNEYS’ FEES ARISING OUT OF ANY SUCH USE, EVEN IF ANY CLAIM ALLEGES THAT INTEL WAS NEGLIGENT REGARDING THE DESIGN OR MANUFACTURE OF THE MATERIALS.
+
+ LIMITATION OF LIABILITY. IN NO EVENT WILL INTEL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. YOU AGREE TO INDEMNIFY AND HOLD INTEL HARMLESS AGAINST ANY CLAIMS AND EXPENSES RESULTING FROM YOUR USE OR UNAUTHORIZED USE OF THE SOFTWARE.
+
+ No support. Intel may make changes to the Software, at any time without notice, and is not obligated to support, update or provide training for the Software.
+
+ Termination. Intel may terminate your right to use the Software in the event of your breach of this Agreement and you fail to cure the breach within a reasonable period of time.
+
+ Feedback. Should you provide Intel with comments, modifications, corrections, enhancements or other input (“Feedback”) related to the Software Intel will be free to use, disclose, reproduce, license or otherwise distribute or exploit the Feedback in its sole discretion without any obligations or restrictions of any kind, including without limitation, intellectual property rights or licensing obligations.
+
+ Compliance with laws. You agree to comply with all relevant laws and regulations governing your use, transfer, import or export (or prohibition thereof) of the Software.
+
+ Governing law. All disputes will be governed by the laws of the United States of America and the State of Delaware without reference to conflict of law principles and subject to the exclusive jurisdiction of the state or federal courts sitting in the State of Delaware, and each party agrees that it submits to the personal jurisdiction and venue of those courts and waives any objections. The United Nations Convention on Contracts for the International Sale of Goods (1980) is specifically excluded and will not apply to the Software.
+
+ *Other names and brands may be claimed as the property of others.
+
+ =======================================================================================
+
+ 14. FindJeMalloc.cmake
+ For details, see, cmake/Modules/FindJeMalloc.cmake
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+
+ Copyright (c) 2014 Thomas Heller
+ Copyright (c) 2007-2012 Hartmut Kaiser
+ Copyright (c) 2010-2011 Matt Anderson
+ Copyright (c) 2011 Bryce Lelbach
+
+ Distributed under the Boost Software License, Version 1.0.
+ Boost Software License - Version 1.0 - August 17th, 2003
+
+ Permission is hereby granted, free of charge, to any person or organization
+ obtaining a copy of the software and accompanying documentation covered by
+ this license (the "Software") to use, reproduce, display, distribute,
+ execute, and transmit the Software, and to prepare derivative works of the
+ Software, and to permit third-parties to whom the Software is furnished to
+ do so, all subject to the following:
+
+ The copyright notices in the Software and this entire statement, including
+ the above license grant, this restriction and the following disclaimer,
+ must be included in all copies of the Software, in whole or in part, and
+ all derivative works of the Software, unless such copies or derivative
+ works are solely in the form of machine-executable object code generated by
+ a source language processor.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+ SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+ FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
diff --git a/MKLDNN_README.md b/MKLDNN_README.md
index 2618d23388e7..ecb721e5fffe 100644
--- a/MKLDNN_README.md
+++ b/MKLDNN_README.md
@@ -1,9 +1,9 @@
# Build/Install MXNet with MKL-DNN
-A better training and inference perforamce are expected to achieved on Intel-Architecture CPUs with MXNET built with [Intel MKL-DNN](/~https://github.com/intel/mkl-dnn) on multiple operating system, including Linux, Windows and MacOS.
-In the following sections, you will find building instructions for MXNET with Intel MKL-DNN on Linux, MacOS and Windows.
+A better training and inference performance is expected to be achieved on Intel-Architecture CPUs with MXNet built with [Intel MKL-DNN](/~https://github.com/intel/mkl-dnn) on multiple operating system, including Linux, Windows and MacOS.
+In the following sections, you will find build instructions for MXNet with Intel MKL-DNN on Linux, MacOS and Windows.
-The detailed performance data collected on Intel Xeon CPU with MXNET built with Intel MKL-DNN can be found at [here](https://mxnet.incubator.apache.org/faq/perf.html#intel-cpu).
+The detailed performance data collected on Intel Xeon CPU with MXNet built with Intel MKL-DNN can be found [here](https://mxnet.incubator.apache.org/faq/perf.html#intel-cpu).
-On Windows, you can use [Micrsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/) and [Microsoft Visual Studio 2017](https://www.visualstudio.com/downloads/) to compile MXNET with Intel MKL-DNN.
+On Windows, you can use [Micrsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/) and [Microsoft Visual Studio 2017](https://www.visualstudio.com/downloads/) to compile MXNet with Intel MKL-DNN.
[Micrsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/) is recommended.
**Visual Studio 2015**
@@ -123,7 +123,7 @@ cmake -G "Visual Studio 14 Win64" .. -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -D
These commands produce a library called ```libmxnet.dll``` in the ```./build/Release/``` or ```./build/Debug``` folder.
Also ```libmkldnn.dll``` with be in the ```./build/3rdparty/mkldnn/src/Release/```
-6. Make sure that all the dll files used above(such as `libmkldnn.dll`, `libmklml.dll`, `libiomp5.dll`, `libopenblas.dll`, etc) are added to the system PATH. For convinence, you can put all of them to ```\windows\system32```. Or you will come across `Not Found Dependencies` when loading mxnet.
+6. Make sure that all the dll files used above(such as `libmkldnn.dll`, `libmklml.dll`, `libiomp5.dll`, `libopenblas.dll`, etc) are added to the system PATH. For convinence, you can put all of them to ```\windows\system32```. Or you will come across `Not Found Dependencies` when loading MXNet.
**Visual Studio 2017**
@@ -177,7 +177,7 @@ cmake -G "Visual Studio 15 2017 Win64" .. -T host=x64 -DUSE_CUDA=0 -DUSE_CUDNN=0
msbuild mxnet.sln /p:Configuration=Release;Platform=x64 /maxcpucount
```
-9. Make sure that all the dll files used above(such as `libmkldnn.dll`, `libmklml.dll`, `libiomp5.dll`, `libopenblas.dll`, etc) are added to the system PATH. For convinence, you can put all of them to ```\windows\system32```. Or you will come across `Not Found Dependencies` when loading mxnet.
+9. Make sure that all the dll files used above(such as `libmkldnn.dll`, `libmklml.dll`, `libiomp5.dll`, `libopenblas.dll`, etc) are added to the system PATH. For convinence, you can put all of them to ```\windows\system32```. Or you will come across `Not Found Dependencies` when loading MXNet.
Verify MXNet with python
diff --git a/Makefile b/Makefile
index 31722e86c085..42010e42b08c 100644
--- a/Makefile
+++ b/Makefile
@@ -18,12 +18,11 @@
ROOTDIR = $(CURDIR)
TPARTYDIR = $(ROOTDIR)/3rdparty
-SCALA_VERSION_PROFILE := scala-2.11
-
ifeq ($(OS),Windows_NT)
UNAME_S := Windows
else
UNAME_S := $(shell uname -s)
+ UNAME_P := $(shell uname -p)
endif
ifndef config
@@ -60,6 +59,16 @@ endif
# use customized config file
include $(config)
+ifndef USE_MKLDNN
+ifneq ($(UNAME_S), Darwin)
+ifneq ($(UNAME_S), Windows)
+ifeq ($(UNAME_P), x86_64)
+ USE_MKLDNN=1
+endif
+endif
+endif
+endif
+
ifeq ($(USE_MKL2017), 1)
$(warning "USE_MKL2017 is deprecated. We will switch to USE_MKLDNN.")
USE_MKLDNN=1
@@ -132,12 +141,7 @@ ifeq ($(USE_MKLDNN), 1)
LDFLAGS += -L$(MKLROOT)/lib
endif
CFLAGS += -I$(MKLDNNROOT)/include
- # MKLDNN but to needs to be dynamically linked for windows as not all VS compilers support static linking
- ifneq ($(UNAME_S), Windows)
- LIB_DEP += $(MKLDNNROOT)/lib/libmkldnn.a
- else
- LDFLAGS += -L$(MKLDNNROOT)/lib -lmkldnn -Wl,-rpath,'$${ORIGIN}'
- endif
+ LDFLAGS += -L$(MKLDNNROOT)/lib -lmkldnn -Wl,-rpath,'$${ORIGIN}'
endif
# setup opencv
@@ -402,18 +406,13 @@ PLUGIN_OBJ =
PLUGIN_CUOBJ =
include $(MXNET_PLUGINS)
-ifeq ($(UNAME_S), Windows)
- # TODO(yizhi) currently scala package does not support windows
- SCALA_PKG_PROFILE := windows
-else
+ifneq ($(UNAME_S), Windows)
ifeq ($(UNAME_S), Darwin)
WHOLE_ARCH= -all_load
NO_WHOLE_ARCH= -noall_load
- SCALA_PKG_PROFILE := osx-x86_64
else
WHOLE_ARCH= --whole-archive
NO_WHOLE_ARCH= --no-whole-archive
- SCALA_PKG_PROFILE := linux-x86_64
endif
endif
@@ -432,7 +431,6 @@ ifeq ($(USE_CUDA), 1)
# Make sure to add stubs as fallback in order to be able to build
# without full CUDA install (especially if run without nvidia-docker)
LDFLAGS += -L/usr/local/cuda/lib64/stubs
- SCALA_PKG_PROFILE := $(SCALA_PKG_PROFILE)-gpu
ifeq ($(USE_NCCL), 1)
ifneq ($(USE_NCCL_PATH), NONE)
CFLAGS += -I$(USE_NCCL_PATH)/include
@@ -444,7 +442,6 @@ ifeq ($(USE_CUDA), 1)
CFLAGS += -DMXNET_USE_NCCL=0
endif
else
- SCALA_PKG_PROFILE := $(SCALA_PKG_PROFILE)-cpu
CFLAGS += -DMXNET_USE_NCCL=0
endif
@@ -459,6 +456,10 @@ else
CFLAGS += -DMXNET_USE_LIBJPEG_TURBO=0
endif
+ifeq ($(CI), 1)
+ MAVEN_ARGS := -B
+endif
+
# For quick compile test, used smaller subset
ALLX_DEP= $(ALL_DEP)
@@ -468,7 +469,7 @@ build/src/%.o: src/%.cc | mkldnn
build/src/%_gpu.o: src/%.cu | mkldnn
@mkdir -p $(@D)
- $(NVCC) $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS)" -M -MT build/src/$*_gpu.o $< >build/src/$*_gpu.d
+ $(NVCC) $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS)" --generate-dependencies -MT build/src/$*_gpu.o $< >build/src/$*_gpu.d
$(NVCC) -c -o $@ $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS)" $<
# A nvcc bug cause it to generate "generic/xxx.h" dependencies from torch headers.
@@ -484,7 +485,7 @@ build/plugin/%.o: plugin/%.cc
%_gpu.o: %.cu
@mkdir -p $(@D)
- $(NVCC) $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS) -Isrc/operator" -M -MT $*_gpu.o $< >$*_gpu.d
+ $(NVCC) $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS) -Isrc/operator" --generate-dependencies -MT $*_gpu.o $< >$*_gpu.d
$(NVCC) -c -o $@ $(NVCCFLAGS) $(CUDA_ARCH) -Xcompiler "$(CFLAGS) -Isrc/operator" $<
%.o: %.cc $(CORE_INC)
@@ -590,19 +591,19 @@ rpkg:
cp -rf lib/libmxnet.so R-package/inst/libs
mkdir -p R-package/inst/include
cp -rf include/* R-package/inst/include
+ rm R-package/inst/include/dmlc
+ rm R-package/inst/include/nnvm
cp -rf 3rdparty/dmlc-core/include/* R-package/inst/include/
cp -rf 3rdparty/tvm/nnvm/include/* R-package/inst/include
Rscript -e "if(!require(devtools)){install.packages('devtools', repo = 'https://cloud.r-project.org/')}"
+ Rscript -e "if(!require(devtools)||packageVersion('roxygen2') < '6.1.1'){install.packages('roxygen2', repo = 'https://cloud.r-project.org/')}"
Rscript -e "library(devtools); library(methods); options(repos=c(CRAN='https://cloud.r-project.org/')); install_deps(pkg='R-package', dependencies = TRUE)"
cp R-package/dummy.NAMESPACE R-package/NAMESPACE
echo "import(Rcpp)" >> R-package/NAMESPACE
R CMD INSTALL R-package
- Rscript -e "if (!require('roxygen2')||packageVersion('roxygen2') < '5.0.1'){\
- devtools::install_version('roxygen2',version='5.0.1',\
- repos='https://cloud.r-project.org/',quiet=TRUE)}"
Rscript -e "require(mxnet); mxnet:::mxnet.export('R-package'); warnings()"
rm R-package/NAMESPACE
- Rscript -e "require(roxygen2); roxygen2::roxygenise('R-package'); warnings()"
+ Rscript -e "devtools::document('R-package'); warnings()"
R CMD INSTALL R-package
rpkgtest:
@@ -610,80 +611,22 @@ rpkgtest:
Rscript -e 'res<-covr:::package_coverage("R-package");fileConn<-file(paste("r-package_coverage_",toString(runif(1)),".json"));writeLines(covr:::to_codecov(res), fileConn);close(fileConn)'
scalaclean:
- (cd $(ROOTDIR)/scala-package && \
- mvn clean -P$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE))
-
-scalatestcompile:
- (cd $(ROOTDIR)/scala-package && \
- mvn test-compile -P$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) -Dcxx="$(CXX)" \
- -Dbuild.platform="$(SCALA_PKG_PROFILE)" \
- -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \
- -Dcurrent_libdir="$(ROOTDIR)/lib" \
- -Dlddeps="$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a")
+ (cd $(ROOTDIR)/scala-package && mvn clean)
scalapkg:
- (cd $(ROOTDIR)/scala-package && \
- mvn package -P$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) -Dcxx="$(CXX)" \
- -Dbuild.platform="$(SCALA_PKG_PROFILE)" \
- -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \
- -Dcurrent_libdir="$(ROOTDIR)/lib" \
- -Dlddeps="$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a")
+ (cd $(ROOTDIR)/scala-package && mvn install -DskipTests)
+
+scalainstall:
+ (cd $(ROOTDIR)/scala-package && mvn install)
scalaunittest:
- (cd $(ROOTDIR)/scala-package && \
- mvn integration-test -P$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE),unittest -Dcxx="$(CXX)" \
- -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \
- -Dlddeps="$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a" $(SCALA_TEST_ARGS))
+ (cd $(ROOTDIR)/scala-package && mvn install)
scalaintegrationtest:
- (cd $(ROOTDIR)/scala-package && \
- mvn integration-test -P$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE),integrationtest -Dcxx="$(CXX)" \
- -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \
- -Dlddeps="$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a" $(SCALA_TEST_ARGS))
-
-scalainstall:
- (cd $(ROOTDIR)/scala-package && \
- mvn install -P$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) -DskipTests=true -Dcxx="$(CXX)" \
- -Dbuild.platform="$(SCALA_PKG_PROFILE)" \
- -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \
- -Dlddeps="$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a")
-
-scalarelease-dryrun:
- (cd $(ROOTDIR)/scala-package && \
- mvn release:clean release:prepare -DdryRun=true -DautoVersionSubmodules=true \
- -Papache-release,$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) \
- -Darguments=""-Dbuild\.platform=\""$(SCALA_PKG_PROFILE)\""\ -DskipTests=true\ -Dcflags=\""$(CFLAGS)\""\ -Dcxx=\""$(CXX)\""\ -Dldflags=\""$(LDFLAGS)\""\ -Dlddeps=\""$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a\"""")
-
-scalarelease-prepare:
- (cd $(ROOTDIR)/scala-package && \
- mvn release:clean release:prepare -DautoVersionSubmodules=true \
- -Papache-release,$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) \
- -Darguments=""-Dbuild\.platform=\""$(SCALA_PKG_PROFILE)\""\ -DskipTests=true\ -Dcflags=\""$(CFLAGS)\""\ -Dcxx=\""$(CXX)\""\ -Dldflags=\""$(LDFLAGS)\""\ -Dlddeps=\""$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a\"""")
-
-scalarelease-perform:
- (cd $(ROOTDIR)/scala-package && \
- mvn release:perform -DautoVersionSubmodules=true \
- -Papache-release,$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) \
- -Darguments=""-Dbuild\.platform=\""$(SCALA_PKG_PROFILE)\""\ -DskipTests=true\ -Dcflags=\""$(CFLAGS)\""\ -Dcxx=\""$(CXX)\""\ -Dldflags=\""$(LDFLAGS)\""\ -Dlddeps=\""$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a\"""")
-
-scaladeploy:
- (cd $(ROOTDIR)/scala-package && \
- mvn deploy -Papache-release,$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) \-DskipTests=true -Dcxx="$(CXX)" \
- -Dbuild.platform="$(SCALA_PKG_PROFILE)" \
- -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \
- -Dlddeps="$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a")
-
-scaladeploylocal:
- (cd $(ROOTDIR)/scala-package && \
- mvn deploy -Papache-release,deployLocal,$(SCALA_PKG_PROFILE),$(SCALA_VERSION_PROFILE) \-DskipTests=true -Dcxx="$(CXX)" \
- -DaltDeploymentRepository=snapshot-repo::default::file:local-snapshot \
- -Dgpg.skip \
- -Dbuild.platform="$(SCALA_PKG_PROFILE)" \
- -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \
- -Dlddeps="$(LIB_DEP) $(ROOTDIR)/lib/libmxnet.a")
+ (cd $(ROOTDIR)/scala-package && mvn integration-test -DskipTests=false)
jnilint:
- 3rdparty/dmlc-core/scripts/lint.py mxnet-jnicpp cpp scala-package/native/src
+ 3rdparty/dmlc-core/scripts/lint.py mxnet-jnicpp cpp scala-package/native/src --exclude_path scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h
rclean:
$(RM) -r R-package/src/image_recordio.h R-package/NAMESPACE R-package/man R-package/R/mxnet_generated.R \
@@ -691,7 +634,7 @@ rclean:
ifneq ($(EXTRA_OPERATORS),)
clean: rclean cyclean $(EXTRA_PACKAGES_CLEAN)
- $(RM) -r build lib bin *~ */*~ */*/*~ */*/*/*~
+ $(RM) -r build lib bin deps *~ */*~ */*/*~ */*/*/*~
cd $(DMLC_CORE); $(MAKE) clean; cd -
cd $(PS_PATH); $(MAKE) clean; cd -
cd $(NNVM_PATH); $(MAKE) clean; cd -
diff --git a/NEWS.md b/NEWS.md
index 68cb2b053aec..f06cc35d8b0f 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,6 +1,579 @@
MXNet Change Log
================
+## 1.4.0
+
+- [New Features](#new-features)
+ * [Java Inference API](#java-inference-api)
+ * [Julia API](#julia-api)
+ * [Control Flow Operators (experimental)](#control-flow-operators--experimental-)
+ * [SVRG Optimization](#svrg-optimization)
+ * [Subgraph API (experimental)](#subgraph-api--experimental-)
+ * [JVM Memory Management](#jvm-memory-management)
+ * [Topology-aware AllReduce (experimental)](#topology-aware-allreduce--experimental-)
+ * [MKLDNN backend: Graph optimization and Quantization (experimental)](#mkldnn-backend--graph-optimization-and-quantization--experimental-)
+ + [Graph Optimization](#graph-optimization)
+ + [Quantization](#quantization)
+- [New Operators](#new-operators)
+- [Feature improvements](#feature-improvements)
+ * [Operator](#operator)
+ * [Optimizer](#optimizer)
+ * [Sparse](#sparse)
+ * [ONNX](#onnx)
+ * [MKLDNN](#mkldnn)
+ * [Inference](#inference)
+ * [Other](#other)
+- [Frontend API updates](#frontend-api-updates)
+ * [Gluon](#gluon)
+ * [Symbol](#symbol)
+- [Language API updates](#language-api-updates)
+ * [Java](#java)
+ * [R](#r)
+ * [Scala](#scala)
+ * [Clojure](#clojure)
+ * [Perl](#perl)
+ * [Julia](#julia)
+- [Performance benchmarks and improvements](#performance-benchmarks-and-improvements)
+- [Bug fixes](#bug-fixes)
+- [Licensing updates](#licensing-updates)
+- [Improvements](#improvements)
+ * [Tutorial](#tutorial)
+ * [Example](#example)
+ * [Documentation](#documentation)
+ * [Website](#website)
+ * [MXNet Distributions](#mxnet-distributions)
+ * [Installation](#installation)
+ * [Build and CI](#build-and-ci)
+ * [3rd party](#3rd-party)
+ + [TVM:](#tvm-)
+ + [CUDNN:](#cudnn-)
+ + [Horovod:](#horovod-)
+- [Deprications](#deprications)
+- [Other](#other-1)
+- [How to build MXNet](#how-to-build-mxnet)
+- [List of submodules used by Apache MXNet (Incubating) and when they were updated last](#list-of-submodules-used-by-apache-mxnet--incubating--and-when-they-were-updated-last)
+### New Features
+#### Java Inference API
+
+Model inference is often managed in a production ecosystem using primarily Java/Scala tools and frameworks. This release seeks to alleviate the need for software engineers to write custom MXNet wrappers to fit their production environment.
+
+Inference on a trained model has a couple of common use cases:
+
+ 1. Real-time or Online Inference - tasks that require immediate feedback, such as fraud detection
+ 2. Batch or Offline Inference - tasks that don't require immediate feedback, these are use cases where you have massive amounts of data and want to run inference or pre-compute inference results
+Real-time Inference is often performed and deployed on popular web frameworks such as Tomcat, Netty, Jetty, etc., all of which use Java.
+Batch Inference is often performed on big data platforms such as Spark using Scala or Java.
+
+With this project, we had the following goals:
+* Build a new set of APIs that are Java friendly, compatible with Java 7+, are easy to use for inference.
+* Lower the barrier to entry of consuming MXNet for production use cases.
+
+More details can be found at the [Java Inference API document](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Java+Inference+API).
+
+#### Julia API
+
+MXNet.jl is the Julia package of Apache MXNet. MXNet.jl brings flexible and efficient GPU computing and state-of-art deep learning to Julia. Some highlights of features include:
+
+ * Efficient tensor/matrix computation across multiple devices, including multiple CPUs, GPUs and distributed server nodes.
+ * Flexible manipulation of symbolic to composite for construction of state-of-the-art deep learning models.
+
+#### Control Flow Operators (experimental)
+
+Today we observe more and more dynamic neural network models, especially in the fields of natural language processing and graph analysis. The dynamics in these models come from multiple sources, including:
+
+ * Models are expressed with control flow, such as conditions and loops;
+ * NDArrays in a model may have dynamic shapes, meaning the NDArrays of a model or some of the NDArrays have different shapes for different batches;
+ * Models may want to use more dynamic data structures, such as lists or dictionaries.
+It's natural to express dynamic models in frameworks with an imperative programming interface (e.g., Gluon, Pytorch, TensorFlow Eager). In this kind of interface, developers can use Python control flows, or NDArrays with any shape at any moment, or use Python lists and dictionaries to store data as they want. The problem of this approach is that it highly dependent on the originating front-end programming language (mainly Python). A model implemented in one language can only run in the same language.
+
+A common use case is that machine learning scientists want to develop their models in Python, whereas engineers who deploy the models usually have to use a different "production" language (e.g., Java or C). Gluon tries to close the gap between the model development and production deployment. Machine learning scientists design and implement their models in Python with the imperative interface, and then Gluon converts the implementations from imperative to symbolic by invoking `hybridize()` for model exporting.
+
+The goal of this project is to enhance Gluon to turn a dynamic neural network into a static computation graph. The dynamic control flows are expressed by control flow operators with Gluon hybridization, and these are exported for deployment.
+
+More information can be found at [Optimize dynamic neural network models with control flow operators](https://cwiki.apache.org/confluence/display/MXNET/Optimize+dynamic+neural+network+models+with+control+flow+operators)
+
+#### SVRG Optimization
+
+SVRG stands for Stochastic Variance Reduced Gradient, which was first introduced in the paper [Accelerating Stochastic Gradient Descent using Predicative Variance Reduction in 2013](https://papers.nips.cc/paper/4937-accelerating-stochastic-gradient-descent-using-predictive-variance-reduction.pdf). It is an optimization technique that complements SGD.
+
+SGD is known for large scale optimization, but it suffers from slow convergence asymptotically due to the inherent variance. SGD approximates the full gradient using a small batch of samples which introduces variance. In order to converge faster, SGD often needs to start with a smaller learning rate.
+
+SVRG remedies the slow convergence problem by keeping a version of the estimated weights that is close to the optimal parameters and maintains the average of the full gradient over the full pass of data. The average of the full gradients of all data is calculated w.r.t to parameters of last mth epochs. It has provable guarantees for strongly convex smooth functions; a detailed proof can be found in section 3 of the [paper](https://papers.nips.cc/paper/4937-accelerating-stochastic-gradient-descent-using-predictive-variance-reduction.pdf). SVRG uses a different update rule than SGD: gradients w.r.t current parameters minus gradients w.r.t parameters from the last mth epoch, plus the average of gradients over all data.
+
+Key Characteristics of SVRG:
+
+ * Explicit variance reduction
+ * Ability to use relatively large learning rate compared to SGD, which leads to faster convergence.
+More details can be found at [SVRG Optimization in MXNet Python Module](https://cwiki.apache.org/confluence/display/MXNET/Unified+integration+with+external+backend+libraries)
+
+#### Subgraph API (experimental)
+
+MXNet can integrate with many different kinds of backend libraries, including TVM, MKLDNN, TensorRT, Intel nGraph and more. In general, these backends support a limited number of operators, so running computation in a model usually involves an interaction between backend-supported operators and MXNet operators. These backend libraries share some common requirements:
+
+TVM , MKLDNN and nGraph use customized data formats. Interaction between these backends with MXNet requires data format conversion.
+TVM, MKLDNN, TensorRT and nGraph fuses operators.
+Integration with these backends should happen in the granularity of subgraphs instead of in the granularity of operators. To fuse operators, it's obvious that we need to divide a graph into subgraphs so that the operators in a subgraph can be fused into a single operator. To handle customized data formats, we should partition a computation graph into subgraphs as well. Each subgraph contains only TVM, MKLDNN or nGraph operators. In this way, MXNet converts data formats only when entering such a subgraph, and the operators inside a subgraph handle format conversion themselves if necessary. This makes interaction of TVM and MKLDNN with MXNet much easier. Neither the MXNet executor nor the MXNet operators need to deal with customized data formats. Even though invoking these libraries from MXNet requires similar steps, the partitioning rule and the subgraph execution of these backends can be different. As such, we define the following interface for backends to customize graph partitioning and subgraph execution inside an operator. More details can be found at PR 12157 and [Subgraph API](https://cwiki.apache.org/confluence/display/MXNET/Unified+integration+with+external+backend+libraries).
+
+#### JVM Memory Management
+
+The MXNet Scala and Java API uses native memory to manage NDArray, Symbol, Executor, DataIterators using MXNet's internal C APIs. The C APIs provide appropriate interfaces to create, access and free these objects. MXNet Scala has corresponding Wrappers and APIs that have pointer references to the native memory. Before this project, JVM users (e.g. Scala, Clojure, or Java) of MXNet have to manage MXNet objects manually using the dispose pattern. There are a few usability problems with this approach:
+
+* Users have to track the MXNet objects manually and remember to call `dispose`. This is not Java idiomatic and not user friendly. Quoting a user: "this feels like I am writing C++ code which I stopped ages ago".
+* Leads to memory leaks if `dispose` is not called.
+* Many objects in MXNet-Scala are managed in native memory, needing to use `dispose` on them as well.
+* Bloated code with `dispose()` methods.
+* Hard to debug memory-leaks.
+Goals of the project are:
+* Provide MXNet JVM users automated memory management that can release native memory when there are no references to JVM objects.
+* Provide automated memory management for both GPU and CPU memory without performance degradation. More details can be found here: [JVM Memory Management](https://cwiki.apache.org/confluence/display/MXNET/JVM+Memory+Management)
+
+#### Topology-aware AllReduce (experimental)
+For distributed training, the `Reduce` communication patterns used by NCCL and MXNet are not optimal for small batch sizes. The `Topology-aware AllReduce` approach is based on the idea of using trees to perform the `Reduce` and `Broadcast` operations. We can use the idea of minimum spanning trees to do a binary tree `Reduce` communication pattern to improve distributed training following this paper by Wang, Li, Edo and Smola [1]. Our strategy is to use:
+
+ * a single tree (latency-optimal for small messages) to handle `Reduce` on small messages
+ * multiple trees (bandwidth-optimal for large messages) to handle `Reduce` on large messages
+
+More details can be found here: [Topology-aware AllReduce](https://cwiki.apache.org/confluence/display/MXNET/Single+machine+All+Reduce+Topology-aware+Communication)
+Note: This is an experimental feature and has known problems - see [13341](/~https://github.com/apache/incubator-mxnet/issues/13341). Please help to contribute to improve the robustness of the feature.
+
+#### MKLDNN backend: Graph optimization and Quantization (experimental)
+
+Two advanced features, graph optimization (operator fusion) and reduced-precision (INT8) computation, are introduced to MKLDNN backend in this release ([#12530](/~https://github.com/apache/incubator-mxnet/pull/12530), [#13297](/~https://github.com/apache/incubator-mxnet/pull/13297), [#13260](/~https://github.com/apache/incubator-mxnet/pull/13260)).
+These features significantly boost the inference performance on CPU (up to 4X) for a broad range of deep learning topologies. Currently, this feature is only available for inference on platforms with [supported Intel CPUs](/~https://github.com/intel/mkl-dnn#system-requirements).
+
+##### Graph Optimization
+MKLDNN backend takes advantage of MXNet subgraph to implement the most of possible operator fusions for inference, such as Convolution + ReLU, Batch Normalization folding, etc. When using mxnet-mkl package, users can easily enable this feature by setting export MXNET_SUBGRAPH_BACKEND=MKLDNN.
+
+##### Quantization
+Performance of reduced-precision (INT8) computation is also dramatically improved after the graph optimization feature is applied on CPU Platforms. Various models are supported and can benefit from reduced-precision computation, including symbolic models, Gluon models and even custom models. Users can run most of the pre-trained models with only a few lines of commands and a new quantization script imagenet_gen_qsym_mkldnn.py. The observed accuracy loss is less than 0.5% for popular CNN networks, like ResNet-50, Inception-BN, MobileNet, etc.
+
+Please find detailed information and performance/accuracy numbers here: [MKLDNN README](/~https://github.com/apache/incubator-mxnet/blob/master/MKLDNN_README.md), [quantization README](/~https://github.com/apache/incubator-mxnet/tree/master/example/quantization#1) and [design proposal](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN)
+
+### New Operators
+
+* Add trigonometric operators (#12424)
+* [MXNET-807] Support integer label type in ctc_loss operator (#12468)
+* [MXNET-876] make CachedOp a normal operator (#11641)
+* Add index_copy() operator (#12810)
+* Fix getnnz operator for CSR matrix (#12908) - issue #12872
+* [MXNET-1173] Debug operators - isfinite, isinf and isnan (#12967)
+* Add sample_like operators (#13034)
+* Add gauss err function operator (#13229)
+* [MXNET -1030] Enhanced Cosine Embedding Loss (#12750)
+* Add bytearray support back to imdecode (#12855, #12868) (#12912)
+* Add Psroipooling CPU implementation (#12738)
+
+### Feature improvements
+#### Operator
+* [MXNET-912] Refactoring ctc loss operator (#12637)
+* Refactor L2_normalization (#13059)
+* Customized and faster `TakeOpForward` operator on CPU (#12997)
+* Allow stop of arange operator to be inferred from dims. (#12064)
+* Make check_isfinite, check_scale optional in clip_global_norm (#12042) add FListInputNames attribute to softmax_cross_entropy (#12701) [MXNET-867] Pooling1D with same padding (#12594)
+* Add support for more req patterns for bilinear sampler backward (#12386) [MXNET-882] Support for N-d arrays added to diag op. (#12430)
+
+#### Optimizer
+* Add a special version of Adagrad optimizer with row-wise learning rate (#12365)
+* Add a Python SVRGModule for performing SVRG Optimization Logic (#12376)
+
+#### Sparse
+
+* Fall back when sparse arrays are passed to MKLDNN-enabled operators (#11664)
+* Add Sparse support for logic operators (#12860)
+* Add Sparse support for take(csr, axis=0) (#12889)
+
+#### ONNX
+
+* ONNX export - Clip operator (#12457)
+* ONNX version update from 1.2.1 to 1.3 in CI (#12633)
+* Use modern ONNX API to load a model from file (#12777)
+* [MXNET-892] ONNX export/import: DepthToSpace, SpaceToDepth operators (#12731)
+* ONNX export: Fully connected operator w/o bias, ReduceSum, Square (#12646)
+* ONNX export/import: Selu (#12785)
+* ONNX export: Cleanup (#12878)
+* [MXNET-892] ONNX export/import: DepthToSpace, SpaceToDepth operators (#12731)
+* ONNX export: Scalar, Reshape - Set appropriate tensor type (#13067)
+* [MXNET-886] ONNX export: HardSigmoid, Less, Greater, Equal (#12812)
+
+#### MKLDNN
+
+* MKLDNN Forward FullyConnected op cache (#11611)
+* [MXNET-753] Fallback when using non-MKLDNN supported operators (#12019)
+* MKLDNN Backward op cache (#11301)
+* Implement mkldnn convolution fusion and quantization. (#12530)
+* Improve mkldnn fallback. (#12663)
+* Update MKL-DNN dependency (#12953)
+* Update MKLML dependency (#13181)
+* [MXNET-33] Enhance mkldnn pooling to support full convention (#11047)
+
+#### Inference
+* [MXNET-910] Multithreading inference. (#12456)
+* Tweaked the copy in c_predict_api.h (#12600)
+
+#### Other
+* support for upper triangular matrices in linalg (#12904)
+* Introduce Random module / Refactor code generation (#13038)
+* [MXNET-779]Add DLPack Transformation API (#12047)
+* Draw label name next to corresponding bounding boxes when the mapping of id to names is specified (#9496)
+* Track epoch metric separately (#12182)
+* Set correct update on kvstore flag in dist_device_sync mode (#12786)
+
+### Frontend API updates
+
+#### Gluon
+
+* Update basic_layers.py (#13299)
+* Gluon LSTM Projection and Clipping Support (#13056)
+* Make Gluon download function to be atomic (#12572)
+* [MXNET -1004] Poisson NegativeLog Likelihood loss (#12697)
+* Add activation information for `mxnet.gluon.nn._Conv` (#12354)
+* Gluon DataLoader: avoid recursionlimit error (#12622)
+
+#### Symbol
+* Addressed dumplicate object reference issues (#13214)
+* Throw exception if MXSymbolInferShape fails (#12733)
+* Infer dtype in SymbolBlock import from input symbol (#12412)
+
+### Language API updates
+#### Java
+* [MXNET-1198] MXNet Java API (#13162)
+
+#### R
+* Refactor R Optimizers to fix memory leak - 11374
+* Add new Vignettes to the R package
+ * Char-level Language modeling - 12670
+ * Multidimensional Time series forecasting - 12664
+* Fix broken Examples and tutorials
+ * Tutorial on neural network introduction - 12117
+ * CGAN example - 12283
+ * Test classification with LSTMs - 12263
+
+#### Scala
+* Explain the details for Scala Experimental (#12348)
+* [MXNET-716] Adding Scala Inference Benchmarks (#12721)
+* [MXNET-716][MIRROR #12723] Scala Benchmark Extension pack (#12758)
+* NativeResource Management in Scala (#12647)
+* Ignore generated Scala files (#12928)
+* Use ResourceScope in Model/Trainer/FeedForward.scala (#12882)
+* [MXNET-1180] Scala Image API (#12995)
+* Update log4j version of Scala package (#13131)
+* Review require() usages to add meaningful messages (#12570)
+* Fix Scala readme (#13082)
+
+#### Clojure
+* Introduction to Clojure-MXNet video link (#12754)
+* Improve the Clojure Package README to Make it Easier to Get Started (#12881)
+* MXNET-873 - Bring Clojure Package Inline with New DataDesc and Layout in Scala Package (#12387)
+* Port of Scala Image API to Clojure (#13107)
+
+#### Perl
+* [MXNET-1026] [Perl] Sync with recent changes in Python's API (#12739)
+
+#### Julia
+* Import Julia binding (#10149), how to use is available at /~https://github.com/apache/incubator-mxnet/tree/master/julia
+
+### Performance benchmarks and improvements
+* Update mshadow for omp acceleration when nvcc is not present (#12674)
+* [MXNET-860] Avoid implicit double conversions (#12361)
+* Add more models to benchmark_score (#12780)
+* Add resnet50-v1 to benchmark_score (#12595)
+
+### Bug fixes
+* Fix for #10920 - increase tolerance for sparse dot (#12527)
+* [MXNET-1234] Fix shape inference problems in Activation backward (#13409)
+* Fix a bug in `where` op with 1-D input (#12325)
+* [MXNET-825] Fix CGAN R Example with MNIST dataset (#12283)
+* [MXNET-535] Fix bugs in LR Schedulers and add warmup (#11234)
+* Fix speech recognition example (#12291)
+* Fix bug in 'device' type kvstore (#12350)
+* fix search result 404s (#12414)
+* Fix help in imread (#12420)
+* Fix render issue on < and > (#12482)
+* [MXNET-853] Fix for smooth_l1 operator scalar default value (#12284)
+* Fix subscribe links, remove disabled icons (#12474)
+* Fix broken URLs (#12508)
+* Fix/public internal header (#12374)
+* Fix lazy record io when used with dataloader and multi_worker > 0 (#12554)
+* Fix error in try/finally block for blc (#12561)
+* Add cudnn_off parameter to SpatialTransformer Op and fix the inconsistency between CPU & GPU code (#12557)
+* [MXNET-798] Fix the dtype cast from non float32 in Gradient computation (#12290)
+* Fix CodeCovs proper commit detection (#12551)
+* Add TensorRT tutorial to index and fix ToC (#12587)
+* Fixed typo in c_predict_api.cc (#12601)
+* Fix typo in profiler.h (#12599)
+* Fixed NoSuchMethodError for Jenkins Job for MBCC (#12618)
+* [MXNET-922] Fix memleak in profiler (#12499)
+* [MXNET-969] Fix buffer overflow in RNNOp (#12603)
+* Fixed param coercion of clojure executor/forward (#12627) (#12630)
+* Fix version dropdown behavior (#12632)
+* Fix reference to wrong function (#12644)
+* Fix the location of the tutorial of control flow operators (#12638)
+* Fix issue 12613 (#12614)
+* [MXNET-780] Fix exception handling bug (#12051)
+* Fix bug in prelu, issue 12061 (#12660)
+* [MXNET-833] [R] Char-level RNN tutorial fix (#12670)
+* Fix static / dynamic linking of gperftools and jemalloc (#12714)
+* Fix #12672, importing numpy scalars (zero-dimensional arrays) (#12678)
+* [MXNET-623] Fixing an integer overflow bug in large NDArray (#11742)
+* Fix benchmark on control flow operators (#12693)
+* Fix regression in MKLDNN caused by PR 12019 (#12740)
+* Fixed broken link for Baidu's WARP CTC (#12774)
+* Fix CNN visualization tutorial (#12719)
+* [MXNET-979] Add fix_beta support in BatchNorm (#12625)
+* R fix metric shape (#12776)
+* Revert [MXNET-979] Add fix_beta support in BatchNorm (#12625) (#12789)
+* Fix mismatch shapes (#12793)
+* Fixed symbols naming in RNNCell, LSTMCell, GRUCell (#12794)
+* Fixed __setattr__ method of _MXClassPropertyMetaClass (#12811)
+* Fixed regex for matching platform type in Scala Benchmark scripts (#12826)
+* Fix broken links (#12856)
+* Fix Flaky Topk (#12798)
+* [MXNET-1033] Fix a bug in MultiboxTarget GPU implementation (#12840)
+* [MXNET-1107] Fix CPUPinned unexpected behaviour (#12031)
+* Fix __all__ in optimizer/optimizer.py (#12886)
+* Fix Batch input issue with Scala Benchmark (#12848)
+* fix type inference in index_copy. (#12890)
+* Fix the paths issue for downloading script (#12913)
+* Fix indpt[0] for take(csr) (#12927)
+* Fix the bug of assigning large integer to NDArray (#12921)
+* Fix Sphinx errors for tutorials and install ToCs (#12945)
+* Fix variable name in tutorial code snippet (#13052)
+* Fix example for mxnet.nd.contrib.cond and fix typo in src/engine (#12954)
+* Fix a typo in operator guide (#13115)
+* Fix variational autoencoder example (#12880)
+* Fix problem with some OSX not handling the cast on imDecode (#13207)
+* [MXNET-953] Fix oob memory read (#12631)
+* Fix Sphinx error in ONNX file (#13251)
+* [Example] Fixing Gradcam implementation (#13196)
+* Fix train mnist for inception-bn and resnet (#13239)
+* Fix a bug in index_copy (#13218)
+* Fix Sphinx errors in box_nms (#13261)
+* Fix Sphinx errors (#13252)
+* Fix the cpp example compiler flag (#13293)
+* Made fixes to sparse.py and sparse.md (#13305)
+* [Example] Gradcam- Fixing a link (#13307)
+* Manually track num_max_thread (#12380)
+* [Issue #11912] throw mxnet exceptions when decoding invalid images. (#12999)
+* Undefined name: load_model() --> utils.load_model() (#12867)
+* Change the way NDArrayIter handle the last batch (#12545)
+* Add embedding to print_summary (#12796)
+* Allow foreach on input with 0 length (#12471)
+* [MXNET-360]auto convert str to bytes in img.imdecode when py3 (#10697)
+* Fix unpicklable transform_first on windows (#13686)
+
+### Licensing updates
+* Add license headers to R-package (#12559)
+* License header (#13178)
+* add url and license to clojure package project (#13304)
+
+### Improvements
+#### Tutorial
+* [MXNET-422] Distributed training tutorial (#10955)
+* Add a tutorial for control flow operators. (#12340)
+* Add tutorial Gotchas using NumPy (#12007)
+* Updated Symbol tutorial with Gluon (#12190)
+* Improve tutorial redirection (#12607)
+* Include missing import in TensorRT tutorial (#12609)
+* Update Operator Implementation Tutorial (#12230)
+* Add a tutorial for the subgraph API. (#12698)
+* Improve clojure tutorial (#12974)
+* Update scala intellij tutorial (#12827)
+* [Example] Gradcam consolidation in tutorial (#13255)
+* [MXNET-1203] Tutorial infogan (#13144)
+* [MXNET-703] Add a TensorRT walkthrough (#12548)
+
+#### Example
+* Update C++ example so it is easier to run (#12397)
+* [MXNET-580] Add SN-GAN example (#12419)
+* [MXNET-637] Multidimensional LSTM example for MXNetR (#12664)
+* [MXNET-982] Provide example to illustrate usage of CSVIter in C++ API (#12636)
+* [MXNET-947] Expand scala imclassification example with resnet (#12639)
+* MKL-DNN Quantization Examples and README (#12808)
+* Extending the DCGAN example implemented by gluon API to provide a more straight-forward evaluation on the generated image (#12790)
+* [MXNET-1017] Updating the readme file for cpp-package and adding readme file for example directory. (#12773)
+* Update tree lstm example (#12960)
+* Update bilstm integer array sorting example (#12929)
+* Updated / Deleted some examples (#12968)
+* Update module example (#12961)
+* Update adversary attack generation example (#12918)
+* Update Gluon example folder (#12951)
+* Update dec example (#12950)
+* Updated capsnet example (#12934)
+* Updates to several examples (#13068)
+* Update multi-task learning example (#12964)
+* Remove obsolete memory cost example (#13235)
+* [Example] Update cpp example README (#13280)
+* [Example]update NER example readme on module prediction (#13184)
+* Update proposal_target.py (#12709)
+* Removing the re-size for validation data, which breaking the validation accuracy of CIFAR training (#12362)
+* Update the README with instruction to redirect the user to gluon-cv (#13186)
+
+#### Documentation
+* Update ONNX API docs references (#12317)
+* Documentation update related to sparse support (#12367)
+* Edit shape.array doc and some style improvements (#12162)
+* Fixed docs/website build checkout bug (#12413)
+* Add Python API docs for test_utils and visualization (#12455)
+* Fix the installation doc for MKL-DNN backend (#12534)
+* Added comment to docs regarding ToTensor transform (#12186)
+* Pinned dockcross to a tag with fixed ABI for RPi (#12588)
+* Refine the documentation of im2rec (#12606)
+* Update and modify Windows docs (#12620)
+* update docs to list cmake required for build from source page (#12592)
+* update the distributed_training document (#12626)
+* Add docstring in im2rec.py (#12621)
+* [Doc] Change the description for pip packages (#12584)
+* Change dependencies documentation opencv2-->opencv (#12654)
+* Add documents for two new environment variables for memory pool. (#12668)
+* Scala Docs - Replace old Symbol api usages (#12759)
+* add/update infer_range docs (#12879)
+* Fix typo in formula in docstring for GRU cell and layer and add clarification to description (gluon.rnn) (#12896)
+* Fix the operator API documentation (#12942)
+* fix broken docs (#12871)
+* fix mac r install and windows python build from source docs (#12919)
+* Document the newly added env variable (#13049)
+* Add documentation on GPU performance on Quantization example (#13145)
+* Fix Sphinx python docstring formatting error. (#13177)
+* [Doc] Fix repo paths in Ubuntu build doc (#13101)
+* Fix Sphinx document parsing error. (#13195)
+* Fix #13090, Add image.imread to python API doc. (#13176)
+* Fix Sphinx docstring formatting error. (#13004, #13005, #13006) (#13175)
+* Fix #12944, Fix Sphinx python docstring formatting error. (#13174)
+* Fix #13013, Fix Sphinx python docstring error. (#13173)
+* Fixed Sparse astype doc string formatting error (#13171)
+* Fixed Documentation issues (#13215)
+* update the doc (#13205)
+* Fix Sphinx doc errors (#13170)
+* Fix Sphinx python docstring error: initializer.InitDesc (#12939) (#13148)
+* Fix Sphinx python docstring error: text contrib module (#12949) (#13149)
+* Fix Sphinx python docstrings (#13160)
+* Add Java API docs generation (#13071)
+* Fix scaladoc build errors (#13189)
+* Add missing documentations for getnnz (#13128)
+* Addressed ONNX module documentation warnings and added notes for short-form representation (#13259)
+* Doc fixes (#13256)
+* Addressed doc issues (#13165)
+* stop gap fix to let website builds through; scaladoc fix pending (#13298)
+* Fix Sphinx python docstring formatting error. (#13194)
+* Visualization doc fix. Added notes for shortform (#13291)
+* [Example] Add docstring for test optimizer and test score (#13286)
+* Fix descriptions in scaladocs for macro ndarray/sybmol APIs (#13210)
+* Sphinx error reduction (#12323)
+* Sphinx errors in Gluon (#13275)
+* Update env_var.md (#12702)
+* Updated the Instructions for use of the label bot (#13192)
+* Added/changed file_name, brief description comments in some files (#13033)
+
+#### Website
+* adding apache conf promo to home page (#12347)
+* Consistent website theme and custom 404 (#12426)
+* update apachecon links to https (#12521)
+* [HOLD] 1.3.0 release website updates (#12509)
+* add mentions of the gluon toolkits and links to resources (#12667)
+* remove apachecon promo (#12695)
+* [MXNet-1002] Add GluonCV and NLP tookits, Keras, and developer wiki to navigation (#12704)
+
+#### MXNet Distributions
+* Make the output of ci/docker/install/ubuntu_mklml.sh less verbose (#12422)
+* Fix tvm dependency for docker (#12479)
+* [MXNET-703] Add TensorRT runtime Dockerfile (#12549)
+* [MXNET-951] Python dockerfiles built on pip binaries and build/release script (#12556)
+* Change numpy version to 1.15.2 in python and docker install requirements (#12711)
+* Add mkl-dnn to docker install method (#12643)
+* Fix docker cleanup race condition (#13092)
+* Bugfix in ci/docker_cache.py (#13249)
+* Update PyPI version number (#11773)
+* update download links to apache distros (#12617)
+
+#### Installation
+* Installation instructions consolidation (#12388)
+* Refine mxnet python installation (#12696)
+* R install instructions update for macOS (#12832)
+* remove legacy installation of Roxygen2 5.0 and add R-specific clean target (#12993) (#12998)
+* Force APT cache update before executing install (#13285)
+* Make the Ubuntu scripts executable after download. (#12180)
+* replacing windows setup with newer instructions (#12504)
+* Updated download links and verification instructions (#12651)
+* Remove pip overwrites (#12604)
+
+#### Build and CI
+* [MXNET-908] Enable minimal OSX Travis build (#12462)
+* Use jom for parallel Windows builds (#12533)
+* [MXNET-950] Enable parallel R dep builds in CI (#12552)
+* Speed up CI Windows builds (#12563)
+* [MXNET-908] Speed up travis builds to avoid timeouts (#12706)
+* Simplify mac MKLDNN build (#12724)
+* [MXNET-674] Speed up GPU builds in CI (#12782)
+* Improved git reset for CI builds (#12784)
+* Improve cpp-package example project build files. (#13093)
+* Add --no-cache option to build.py when building containers (#13182)
+* Addressed sphinx build issue (#13246)
+* Tighten up PyLint directives again (#12322)
+* [MXNET-859] Add a clang-tidy stage to CI (#12282)
+* A solution to prevent zombie containers locally and in CI (#12381)
+* [MXNET-696][PYTHON][UNDEFINED NAME] import logging in ci/util.py (#12488)
+* [MXNET-703] Static linking for libprotobuf with TensorRT (#12475)
+* Remove regression checks for website links (#12507)
+* [MXNET-953] - Add ASAN sanitizer, Enable in CI (#12370)
+* Allow custom path and static linking for custom mallocs in make (#12645)
+* Correct PR branch detection in code coverage (#12615)
+* Update osx.mk - Added apple to USE_BLAS comment (#12819)
+* [MXNET-953] Correct ASAN cflags flag (#12659)
+* [MXNET-1025] Add Jetpack 3.3 support to Jetson (#12735)
+* Fail the broken link job when broken links are found (#12905)
+* Removed unused header (#13066)
+* Maven Surefire bug workaround (#13081)
+* Add Turing and Volta support to arch_name (#13168)
+* Moves f16c autodetection to its own cmake module (#12331)
+* la_op_inline.h to la_op-inl.h for consistency (#13045)
+* [MXNET-793] Virtualized ARMv7 with Qemu CI integration (#13203)
+* Remove unused variable `rotateM_` (#10803)
+* Separate refactoring from #12276 in a prior PR (#12296)
+* [MXNET-860] Remove std::moves that have no affect (#12730)
+* [MXNET-860] Use emplace where helpful (#12694)
+* Enable C++ coverage (#12642)
+* [MXNET-860] Update to modern nullptr usage (#12352)
+* [MXNET-860] Reduce redundant copies, check for regressions with clang-tidy (#12355)
+
+
+#### 3rd party
+##### TVM:
+* Updated tvm submodule head (#12764)
+* Updated tvm submodule head (#12448)
+##### CUDNN:
+* [MXNET-1179] Enforce deterministic algorithms in convolution layers (#12992)
+* CudnnFind() usage improvements (#12804)
+* Add option for automatic downcasting dtype for cudnn to allow using Tensorcore for fp32 (#12722)
+##### Horovod:
+* [MXNET-1111] Remove CPUPinned in ImageRecordIter (#12666)
+
+### Deprications
+* Add a deprecate message (#13042) contrib_CTCLoss is deprecated. Added a message in command
+### Other
+* Updating news, readme files and bumping master version to 1.3.1 (#12525)
+* Add new name to CONTRIBUTORS.md (#12763)
+* Update contribute.md (#12685)
+* Updated CONTRIBUTORS.md to include lebeg and gigasquid, moved mabreu to committers section (#12766)
+* Update CONTRIBUTORS.md (#12996)
+* Updated CONTRIBUTORS.md to include mxnet-label-bot (#13048)
+
+### How to build MXNet
+Please follow the instructions at https://mxnet.incubator.apache.org/install/index.html
+
+### List of submodules used by Apache MXNet (Incubating) and when they were updated last
+Submodule@commit ID::Last updated by MXNet:: Last update in submodule
+
+* cub@05eb57f::Jul 31, 2017 :: Jul 31, 2017
+* dlpack@10892ac:: Oct 30, 2017 :: Aug 23, 2018
+* dmlc-core@0a0e8ad:: Aug 15, 2018 :: Nov 15, 2018
+* googletest@ec44c6c:: July 14, 2016 :: July 14, 2016
+* mkldnn@a7c5f53:: Nov 7, 2018 :: Nov 5, 2018
+* mshadow@696803b:: Sep 28, 2018 :: Nov 7, 2018
+* onnx-tensorrt@3d8ee04:: Aug 22, 2018 :: Nov 10, 2018
+* openmp@37c7212: Nov 22, 2017 :: Nov 13, 2018
+* ps-lite@8a76389: April 25, 2018 :: Oct 9, 2018
+* tvm@0f053c8: Oct 10, 2018 :: Oct 8, 2018
+
## 1.3.1
### Bug fixes
diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION
index c710a915bd88..70aa66e36b7e 100644
--- a/R-package/DESCRIPTION
+++ b/R-package/DESCRIPTION
@@ -29,7 +29,7 @@ Suggests:
imager,
covr
Depends:
- R (>= 3.3.0)
+ R (>= 3.4.4)
LinkingTo: Rcpp
VignetteBuilder: knitr
-RoxygenNote: 6.1.0
+RoxygenNote: 6.1.1
diff --git a/ci/Jenkinsfile_utils.groovy b/ci/Jenkinsfile_utils.groovy
index f82c238ed075..8291bae1f7b7 100644
--- a/ci/Jenkinsfile_utils.groovy
+++ b/ci/Jenkinsfile_utils.groovy
@@ -64,7 +64,7 @@ def init_git_win() {
// pack libraries for later use
def pack_lib(name, libs, include_gcov_data = false) {
- sh """
+ sh returnStatus: true, script: """
set +e
echo "Packing ${libs} into ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
@@ -83,7 +83,7 @@ return 0
def unpack_and_init(name, libs, include_gcov_data = false) {
init_git()
unstash name
- sh """
+ sh returnStatus: true, script: """
set +e
echo "Unpacked ${libs} from ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
@@ -147,8 +147,9 @@ def collect_test_results_windows(original_file_name, new_file_name) {
}
-def docker_run(platform, function_name, use_nvidia, shared_mem = '500m') {
- def command = "ci/build.py --docker-registry ${env.DOCKER_CACHE_REGISTRY} %USE_NVIDIA% --platform %PLATFORM% --docker-build-retries 3 --shm-size %SHARED_MEM% /work/runtime_functions.sh %FUNCTION_NAME%"
+def docker_run(platform, function_name, use_nvidia, shared_mem = '500m', env_vars = "") {
+ def command = "ci/build.py %ENV_VARS% --docker-registry ${env.DOCKER_CACHE_REGISTRY} %USE_NVIDIA% --platform %PLATFORM% --docker-build-retries 3 --shm-size %SHARED_MEM% /work/runtime_functions.sh %FUNCTION_NAME%"
+ command = command.replaceAll('%ENV_VARS%', env_vars.length() > 0 ? "-e ${env_vars}" : '')
command = command.replaceAll('%USE_NVIDIA%', use_nvidia ? '--nvidiadocker' : '')
command = command.replaceAll('%PLATFORM%', platform)
command = command.replaceAll('%FUNCTION_NAME%', function_name)
diff --git a/ci/build.py b/ci/build.py
index 0069392d9a2a..1c7a4f8b3231 100755
--- a/ci/build.py
+++ b/ci/build.py
@@ -92,22 +92,24 @@ def get_dockerfiles_path():
def get_platforms(path: str = get_dockerfiles_path()) -> List[str]:
"""Get a list of architectures given our dockerfiles"""
- dockerfiles = glob.glob(os.path.join(path, "Dockerfile.build.*"))
+ dockerfiles = glob.glob(os.path.join(path, "Dockerfile.*"))
dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles))
- files = list(map(lambda x: re.sub(r"Dockerfile.build.(.*)", r"\1", x), dockerfiles))
+ files = list(map(lambda x: re.sub(r"Dockerfile.(.*)", r"\1", x), dockerfiles))
platforms = list(map(lambda x: os.path.split(x)[1], sorted(files)))
return platforms
def get_docker_tag(platform: str, registry: str) -> str:
""":return: docker tag to be used for the container"""
+ platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform)
if not registry:
registry = "mxnet_local"
- return "{0}/build.{1}".format(registry, platform)
+ return "{0}/{1}".format(registry, platform)
def get_dockerfile(platform: str, path=get_dockerfiles_path()) -> str:
- return os.path.join(path, "Dockerfile.build.{0}".format(platform))
+ platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform)
+ return os.path.join(path, "Dockerfile.{0}".format(platform))
def get_docker_binary(use_nvidia_docker: bool) -> str:
@@ -215,20 +217,21 @@ def container_run(platform: str,
local_ccache_dir: str,
command: List[str],
cleanup: Cleanup,
+ environment: Dict[str, str],
dry_run: bool = False) -> int:
"""Run command in a container"""
container_wait_s = 600
#
# Environment setup
#
- environment = {
+ environment.update({
'CCACHE_MAXSIZE': '500G',
'CCACHE_TEMPDIR': '/tmp/ccache', # temp dir should be local and not shared
'CCACHE_DIR': '/work/ccache', # this path is inside the container as /work/ccache is
# mounted
'CCACHE_LOGFILE': '/tmp/ccache.log', # a container-scoped log, useful for ccache
# verification.
- }
+ })
# These variables are passed to the container to the process tree killer can find runaway
# process inside the container
# https://wiki.jenkins.io/display/JENKINS/ProcessTreeKiller
@@ -446,6 +449,10 @@ def main() -> int:
parser.add_argument("--no-cache", action="store_true",
help="passes --no-cache to docker build")
+ parser.add_argument("-e", "--environment", nargs="*", default=[],
+ help="Environment variables for the docker container. "
+ "Specify with a list containing either names or name=value")
+
parser.add_argument("command",
help="command to run in the container",
nargs='*', action='append', type=str)
@@ -474,6 +481,9 @@ def signal_handler(signum, _):
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
+ environment = dict([(e.split('=')[:2] if '=' in e else (e, os.environ[e]))
+ for e in args.environment])
+
if args.list:
print(list_platforms())
elif args.platform:
@@ -493,13 +503,13 @@ def signal_handler(signum, _):
ret = container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
- local_ccache_dir=args.ccache_dir, cleanup=cleanup)
+ local_ccache_dir=args.ccache_dir, cleanup=cleanup, environment=environment)
elif args.print_docker_run:
command = []
ret = container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
- local_ccache_dir=args.ccache_dir, dry_run=True, cleanup=cleanup)
+ local_ccache_dir=args.ccache_dir, dry_run=True, cleanup=cleanup, environment=environment)
else:
# With no commands, execute a build function for the target platform
command = ["/work/mxnet/ci/docker/runtime_functions.sh", "build_{}".format(platform)]
@@ -507,7 +517,7 @@ def signal_handler(signum, _):
ret = container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
- local_ccache_dir=args.ccache_dir, cleanup=cleanup)
+ local_ccache_dir=args.ccache_dir, cleanup=cleanup, environment=environment)
if ret != 0:
logging.critical("Execution of %s failed with status: %d", command, ret)
@@ -515,6 +525,7 @@ def signal_handler(signum, _):
elif args.all:
platforms = get_platforms()
+ platforms = [platform for platform in platforms if 'build.' in platform]
logging.info("Building for all architectures: %s", platforms)
logging.info("Artifacts will be produced in the build/ directory.")
for platform in platforms:
@@ -535,7 +546,7 @@ def signal_handler(signum, _):
container_run(
platform=platform, nvidia_runtime=args.nvidiadocker,
shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry,
- local_ccache_dir=args.ccache_dir, cleanup=cleanup)
+ local_ccache_dir=args.ccache_dir, cleanup=cleanup, environment=environment)
shutil.move(buildir(), plat_buildir)
logging.info("Built files left in: %s", plat_buildir)
diff --git a/ci/docker/Dockerfile.build.android_armv7 b/ci/docker/Dockerfile.build.android_armv7
index c601fc5e5ff7..a2e98cd2efe1 100644
--- a/ci/docker/Dockerfile.build.android_armv7
+++ b/ci/docker/Dockerfile.build.android_armv7
@@ -75,6 +75,11 @@ ENV OpenBLAS_DIR=${CROSS_ROOT}
WORKDIR /work
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
COPY runtime_functions.sh /work/
WORKDIR /work/mxnet
diff --git a/ci/docker/Dockerfile.build.android_armv8 b/ci/docker/Dockerfile.build.android_armv8
index 60376b8efda2..f7de86763457 100644
--- a/ci/docker/Dockerfile.build.android_armv8
+++ b/ci/docker/Dockerfile.build.android_armv8
@@ -74,6 +74,12 @@ ENV CXX=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-clang++
COPY install/android_arm64_openblas.sh /work/
RUN /work/android_arm64_openblas.sh
ENV CPLUS_INCLUDE_PATH /work/deps/OpenBLAS
-WORKDIR /work/build
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
COPY runtime_functions.sh /work/
+
+WORKDIR /work/build
\ No newline at end of file
diff --git a/ci/docker/Dockerfile.build.armv6 b/ci/docker/Dockerfile.build.armv6
index 6f16d8c77a0a..60e223b7a60f 100644
--- a/ci/docker/Dockerfile.build.armv6
+++ b/ci/docker/Dockerfile.build.armv6
@@ -38,5 +38,10 @@ ENV OpenBLAS_DIR=${CROSS_ROOT}
COPY install/deb_ubuntu_ccache.sh /work/
RUN /work/deb_ubuntu_ccache.sh
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
COPY runtime_functions.sh /work/
WORKDIR /work/mxnet
diff --git a/ci/docker/Dockerfile.build.armv7 b/ci/docker/Dockerfile.build.armv7
index 5f0223448f12..0b557d5839e9 100644
--- a/ci/docker/Dockerfile.build.armv7
+++ b/ci/docker/Dockerfile.build.armv7
@@ -38,5 +38,10 @@ ENV OpenBLAS_DIR=${CROSS_ROOT}
COPY install/deb_ubuntu_ccache.sh /work/
RUN /work/deb_ubuntu_ccache.sh
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
COPY runtime_functions.sh /work/
WORKDIR /work/mxnet
diff --git a/ci/docker/Dockerfile.build.armv8 b/ci/docker/Dockerfile.build.armv8
index 27bd425ae9b7..ef9c95865590 100644
--- a/ci/docker/Dockerfile.build.armv8
+++ b/ci/docker/Dockerfile.build.armv8
@@ -42,5 +42,10 @@ ENV OpenBLAS_DIR=${CROSS_ROOT}
COPY install/deb_ubuntu_ccache.sh /work/
RUN /work/deb_ubuntu_ccache.sh
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
COPY runtime_functions.sh /work/
WORKDIR /work/build
diff --git a/ci/docker/Dockerfile.build.jetson b/ci/docker/Dockerfile.build.jetson
index d128ebc7e2a7..07097887f87d 100644
--- a/ci/docker/Dockerfile.build.jetson
+++ b/ci/docker/Dockerfile.build.jetson
@@ -77,10 +77,16 @@ RUN JETPACK_DOWNLOAD_PREFIX=https://developer.download.nvidia.com/devzone/devcen
dpkg -i --force-architecture $ARM_NVINFER_INSTALLER_PACKAGE && \
dpkg -i --force-architecture $ARM_NVINFER_DEV_INSTALLER_PACKAGE && \
apt update -y || true && apt install -y cuda-libraries-dev-9-0 libcudnn7-dev libnvinfer-dev
+RUN ln -s /usr/include/aarch64-linux-gnu/cudnn_v7.h /usr/include/aarch64-linux-gnu/cudnn.h
ENV PATH $PATH:/usr/local/cuda/bin
ENV NVCCFLAGS "-m64"
ENV CUDA_ARCH "-gencode arch=compute_53,code=sm_53 -gencode arch=compute_62,code=sm_62"
ENV NVCC /usr/local/cuda/bin/nvcc
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
COPY runtime_functions.sh /work/
WORKDIR /work/mxnet
diff --git a/ci/docker/Dockerfile.publish.test.centos7_cpu b/ci/docker/Dockerfile.publish.test.centos7_cpu
new file mode 100644
index 000000000000..7d284452971b
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.centos7_cpu
@@ -0,0 +1,38 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to build and run MXNet on CentOS 7 for CPU
+
+FROM centos:7
+
+WORKDIR /work/deps
+
+COPY install/centos7_base.sh /work/
+RUN /work/centos7_base.sh
+
+COPY install/centos7_scala.sh /work/
+RUN /work/centos7_scala.sh
+
+ARG USER_ID=0
+COPY install/centos7_adduser.sh /work/
+RUN /work/centos7_adduser.sh
+
+ENV PYTHONPATH=./python/
+WORKDIR /work/mxnet
+
+COPY runtime_functions.sh /work/
diff --git a/ci/docker/Dockerfile.publish.test.centos7_gpu b/ci/docker/Dockerfile.publish.test.centos7_gpu
new file mode 100644
index 000000000000..e7f584683109
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.centos7_gpu
@@ -0,0 +1,38 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to build and run MXNet on CentOS 7 for CPU
+
+FROM nvidia/cuda:9.2-cudnn7-devel-centos7
+
+WORKDIR /work/deps
+
+COPY install/centos7_base.sh /work/
+RUN /work/centos7_base.sh
+
+COPY install/centos7_scala.sh /work/
+RUN /work/centos7_scala.sh
+
+ARG USER_ID=0
+COPY install/centos7_adduser.sh /work/
+RUN /work/centos7_adduser.sh
+
+ENV PYTHONPATH=./python/
+WORKDIR /work/mxnet
+
+COPY runtime_functions.sh /work/
diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1404_cpu b/ci/docker/Dockerfile.publish.test.ubuntu1404_cpu
new file mode 100644
index 000000000000..035837686554
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.ubuntu1404_cpu
@@ -0,0 +1,39 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to build and run MXNet on Ubuntu 14.04 for CPU
+
+FROM ubuntu:14.04
+
+WORKDIR /work/deps
+
+COPY install/ubuntu_base.sh /work/
+RUN /work/ubuntu_base.sh
+
+COPY install/ubuntu_scala.sh /work/
+RUN /work/ubuntu_scala.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1404_gpu b/ci/docker/Dockerfile.publish.test.ubuntu1404_gpu
new file mode 100644
index 000000000000..854dd68a63c1
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.ubuntu1404_gpu
@@ -0,0 +1,40 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to run MXNet on Ubuntu 14.04 for GPU
+
+# Use CPU with setup_gpu script
+FROM ubuntu:14.04
+
+WORKDIR /work/deps
+
+COPY install/ubuntu_base.sh /work/
+RUN /work/ubuntu_base.sh
+
+COPY install/ubuntu_scala.sh /work/
+RUN /work/ubuntu_scala.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1604_cpu b/ci/docker/Dockerfile.publish.test.ubuntu1604_cpu
new file mode 100644
index 000000000000..bbb7b6a0d7bd
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.ubuntu1604_cpu
@@ -0,0 +1,39 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU
+
+FROM ubuntu:16.04
+
+WORKDIR /work/deps
+
+COPY install/ubuntu_base.sh /work/
+RUN /work/ubuntu_base.sh
+
+COPY install/ubuntu_scala.sh /work/
+RUN /work/ubuntu_scala.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1604_gpu b/ci/docker/Dockerfile.publish.test.ubuntu1604_gpu
new file mode 100644
index 000000000000..660461dc0cfa
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.ubuntu1604_gpu
@@ -0,0 +1,39 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to run MXNet on Ubuntu 16.04 for GPU
+
+FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu16.04
+
+WORKDIR /work/deps
+
+COPY install/ubuntu_base.sh /work/
+RUN /work/ubuntu_base.sh
+
+COPY install/ubuntu_scala.sh /work/
+RUN /work/ubuntu_scala.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1804_cpu b/ci/docker/Dockerfile.publish.test.ubuntu1804_cpu
new file mode 100644
index 000000000000..e3a8c193f234
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.ubuntu1804_cpu
@@ -0,0 +1,41 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU
+
+FROM ubuntu:18.04
+
+WORKDIR /work/deps
+
+ENV DEBIAN_FRONTEND noninteractive
+
+COPY install/ubuntu_base.sh /work/
+RUN /work/ubuntu_base.sh
+
+COPY install/ubuntu_scala.sh /work/
+RUN /work/ubuntu_scala.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1804_gpu b/ci/docker/Dockerfile.publish.test.ubuntu1804_gpu
new file mode 100644
index 000000000000..99f7e0d3eff9
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.test.ubuntu1804_gpu
@@ -0,0 +1,41 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to run MXNet on Ubuntu 18.04 for GPU
+
+FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu18.04
+
+WORKDIR /work/deps
+
+ENV DEBIAN_FRONTEND noninteractive
+
+COPY install/ubuntu_base.sh /work/
+RUN /work/ubuntu_base.sh
+
+COPY install/ubuntu_scala.sh /work/
+RUN /work/ubuntu_scala.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/Dockerfile.publish.ubuntu1404_cpu b/ci/docker/Dockerfile.publish.ubuntu1404_cpu
new file mode 100644
index 000000000000..04ce94f95eae
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.ubuntu1404_cpu
@@ -0,0 +1,36 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to build and run MXNet on Ubuntu 14.04 for CPU
+
+FROM ubuntu:14.04
+
+WORKDIR /work/deps
+
+COPY install/ubuntu_publish.sh /work/
+RUN /work/ubuntu_publish.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/Dockerfile.publish.ubuntu1404_gpu b/ci/docker/Dockerfile.publish.ubuntu1404_gpu
new file mode 100644
index 000000000000..9855986a2891
--- /dev/null
+++ b/ci/docker/Dockerfile.publish.ubuntu1404_gpu
@@ -0,0 +1,36 @@
+# -*- mode: dockerfile -*-
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Dockerfile to run MXNet on Ubuntu 14.04 for GPU
+
+FROM ubuntu:14.04
+
+WORKDIR /work/deps
+
+COPY install/ubuntu_publish.sh /work/
+RUN /work/ubuntu_publish.sh
+
+ARG USER_ID=0
+ARG GROUP_ID=0
+COPY install/ubuntu_adduser.sh /work/
+RUN /work/ubuntu_adduser.sh
+
+COPY runtime_functions.sh /work/
+
+WORKDIR /work/mxnet
+ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
diff --git a/ci/docker/install/centos7_adduser.sh b/ci/docker/install/centos7_adduser.sh
index ba72c9b92281..f9d2402c9554 100755
--- a/ci/docker/install/centos7_adduser.sh
+++ b/ci/docker/install/centos7_adduser.sh
@@ -34,4 +34,9 @@ then
mkdir /work/mxnet
mkdir /work/build
chown -R jenkins_slave /work/
+
+ # Later on, we have to override the links because underlying build systems ignore our compiler settings. Thus,
+ # we have to give the process the proper permission to these files. This is hacky, but unfortunately
+ # there's no better way to do this without patching all our submodules.
+ chown -R jenkins_slave /usr/local/bin
fi
diff --git a/ci/docker/install/centos7_base.sh b/ci/docker/install/centos7_base.sh
new file mode 100755
index 000000000000..3b84aeb57b06
--- /dev/null
+++ b/ci/docker/install/centos7_base.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# build and install are separated so changes to build don't invalidate
+# the whole docker cache for the image
+
+set -ex
+
+# Multipackage installation does not fail in yum
+yum -y install epel-release
+yum -y install git
+yum -y install wget
+yum -y install make
+yum -y install cmake
+yum -y install unzip
+yum -y install ninja-build
+yum -y install gcc-gfortran
diff --git a/ci/docker/install/centos7_scala.sh b/ci/docker/install/centos7_scala.sh
index ea46de9b9311..5c43f011cbf1 100755
--- a/ci/docker/install/centos7_scala.sh
+++ b/ci/docker/install/centos7_scala.sh
@@ -23,9 +23,17 @@
set -ex
yum install -y java-1.8.0-openjdk-devel
+export JAVA_HOME=/usr/lib/jvm/jre-1.8.0-openjdk
+export PATH=$JAVA_HOME/bin:$PATH
# Build from source with Maven
-wget http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
+wget -q http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
tar xzf apache-maven-3.3.9-bin.tar.gz
mkdir /usr/local/maven
mv apache-maven-3.3.9/ /usr/local/maven/
alternatives --install /usr/bin/mvn mvn /usr/local/maven/apache-maven-3.3.9/bin/mvn 1
+
+echo "export JAVA_HOME=/usr/lib/jvm/jre-1.8.0-openjdk" >> /etc/profile.d/maven.sh
+echo "export M3_HOME=/usr/local/src/apache-maven" >> /etc/profile.d/maven.sh
+echo "export PATH=$M3_HOME/bin:$JAVA_HOME/bin:$PATH" >> /etc/profile.d/maven.sh
+chmod +x /etc/profile.d/maven.sh
+source /etc/profile.d/maven.sh
diff --git a/ci/docker/install/ubuntu_adduser.sh b/ci/docker/install/ubuntu_adduser.sh
index 515a80f63b07..a7668bac2ab6 100755
--- a/ci/docker/install/ubuntu_adduser.sh
+++ b/ci/docker/install/ubuntu_adduser.sh
@@ -40,4 +40,9 @@ then
mkdir /work/mxnet
mkdir /work/build
chown -R jenkins_slave /work/
+
+ # Later on, we have to override the links because underlying build systems ignore our compiler settings. Thus,
+ # we have to give the process the proper permission to these files. This is hacky, but unfortunately
+ # there's no better way to do this without patching all our submodules.
+ chown -R jenkins_slave /usr/local/bin
fi
diff --git a/ci/docker/install/ubuntu_base.sh b/ci/docker/install/ubuntu_base.sh
new file mode 100755
index 000000000000..b34c0b3e18f1
--- /dev/null
+++ b/ci/docker/install/ubuntu_base.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# build and install are separated so changes to build don't invalidate
+# the whole docker cache for the image
+
+set -ex
+apt-get update || true
+apt-get install -y \
+ build-essential \
+ ca-certificates \
+ cmake \
+ curl \
+ git \
+ ninja-build \
+ libgfortran3 \
+ software-properties-common \
+ sudo \
+ unzip \
+ wget
diff --git a/ci/docker/install/ubuntu_core.sh b/ci/docker/install/ubuntu_core.sh
index 64f8af3e0444..4382aa6aefd0 100755
--- a/ci/docker/install/ubuntu_core.sh
+++ b/ci/docker/install/ubuntu_core.sh
@@ -26,7 +26,6 @@ apt-get install -y \
apt-transport-https \
build-essential \
ca-certificates \
- cmake \
curl \
git \
libatlas-base-dev \
@@ -41,3 +40,11 @@ apt-get install -y \
sudo \
unzip \
wget
+
+
+# Ubuntu 14.04
+if [[ $(lsb_release -r | grep 14.04) ]]; then
+ apt-get install -y cmake3
+else
+ apt-get install -y cmake
+fi
diff --git a/ci/docker/install/ubuntu_nvidia.sh b/ci/docker/install/ubuntu_nvidia.sh
index 3d8de9d0d7dd..7012b897ff91 100755
--- a/ci/docker/install/ubuntu_nvidia.sh
+++ b/ci/docker/install/ubuntu_nvidia.sh
@@ -18,11 +18,6 @@
# under the License.
set -ex
-apt-get update || true
-apt install -y software-properties-common
-
-# Adding ppas frequently fails due to busy gpg servers, retry 5 times with 5 minute delays.
-for i in 1 2 3 4 5; do add-apt-repository -y ppa:graphics-drivers && break || sleep 300; done
# Retrieve ppa:graphics-drivers and install nvidia-drivers.
# Note: DEBIAN_FRONTEND required to skip the interactive setup steps
diff --git a/ci/docker/install/ubuntu_publish.sh b/ci/docker/install/ubuntu_publish.sh
new file mode 100644
index 000000000000..1ad6ab947842
--- /dev/null
+++ b/ci/docker/install/ubuntu_publish.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Build on Ubuntu 14.04 LTS for LINUX CPU/GPU
+apt-get update
+apt-get install -y software-properties-common
+add-apt-repository ppa:ubuntu-toolchain-r/test -y
+add-apt-repository ppa:openjdk-r/ppa -y # Java lib
+apt-get update
+apt-get install -y git \
+ cmake3 \
+ libcurl4-openssl-dev \
+ unzip \
+ gcc-4.8 \
+ g++-4.8 \
+ gfortran \
+ gfortran-4.8 \
+ binutils \
+ nasm \
+ libtool \
+ curl \
+ wget \
+ sudo \
+ gnupg \
+ gnupg2 \
+ gnupg-agent \
+ pandoc \
+ python3-pip \
+ automake \
+ pkg-config \
+ openjdk-8-jdk
+curl -o apache-maven-3.3.9-bin.tar.gz http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
+tar xzf apache-maven-3.3.9-bin.tar.gz
+mkdir /usr/local/maven
+mv apache-maven-3.3.9/ /usr/local/maven/
+update-alternatives --install /usr/bin/mvn mvn /usr/local/maven/apache-maven-3.3.9/bin/mvn 1
+update-ca-certificates -f
+
+apt-get install -y python python3
+
+# the version of the pip shipped with ubuntu may be too lower, install a recent version here
+wget -nv https://bootstrap.pypa.io/get-pip.py
+python3 get-pip.py
+python2 get-pip.py
+
+apt-get remove -y python3-urllib3
+
+pip2 install nose cpplint==1.3.0 pylint==1.9.3 'numpy<=1.15.2,>=1.8.2' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3
+pip3 install nose cpplint==1.3.0 pylint==2.1.1 'numpy<=1.15.2,>=1.8.2' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3
diff --git a/ci/docker/install/ubuntu_scala.sh b/ci/docker/install/ubuntu_scala.sh
index 6ecb8d801186..5bade47463e2 100755
--- a/ci/docker/install/ubuntu_scala.sh
+++ b/ci/docker/install/ubuntu_scala.sh
@@ -24,13 +24,31 @@ set -ex
cd "$(dirname "$0")"
# install libraries for mxnet's scala package on ubuntu
echo 'Installing Scala...'
-apt-get update || true
-apt-get install -y software-properties-common
-apt-get update || true
-apt-get install -y openjdk-8-jdk
-apt-get install -y openjdk-8-jre
+# Ubuntu 14.04
+if [[ $(lsb_release -r | grep 14.04) ]]; then
+ add-apt-repository -y ppa:openjdk-r/ppa
+fi
+
+# All Ubuntu
apt-get update || true
apt-get install -y \
- maven \
+ openjdk-8-jdk \
+ openjdk-8-jre \
+ software-properties-common \
+ gnupg \
+ gnupg2 \
+ gnupg-agent \
scala
+
+# Ubuntu 14.04
+if [[ $(lsb_release -r | grep 14.04) ]]; then
+ curl -o apache-maven-3.3.9-bin.tar.gz http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
+ tar xzf apache-maven-3.3.9-bin.tar.gz
+ mkdir /usr/local/maven
+ mv apache-maven-3.3.9/ /usr/local/maven/
+ update-alternatives --install /usr/bin/mvn mvn /usr/local/maven/apache-maven-3.3.9/bin/mvn 1
+ update-ca-certificates -f
+else
+ apt-get install -y maven
+fi
diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index 82e6feb2a728..a6bb1064a589 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -36,35 +36,67 @@ clean_repo() {
git submodule update --init --recursive
}
+scala_prepare() {
+ # Clean up maven logs
+ export MAVEN_OPTS="-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn"
+}
+
build_ccache_wrappers() {
set -ex
- rm -f cc
- rm -f cxx
-
- touch cc
- touch cxx
-
if [ -z ${CC+x} ]; then
echo "No \$CC set, defaulting to gcc";
export CC=gcc
fi
-
- if [ -z ${CXX+x} ]; then
+ if [ -z ${CXX+x} ]; then
echo "No \$CXX set, defaulting to g++";
export CXX=g++
fi
- # this function is nessesary for cuda enabled make based builds, since nvcc needs just an executable for -ccbin
-
- echo -e "#!/bin/sh\n/usr/local/bin/ccache ${CC} \"\$@\"\n" >> cc
- echo -e "#!/bin/sh\n/usr/local/bin/ccache ${CXX} \"\$@\"\n" >> cxx
-
- chmod +x cc
- chmod +x cxx
-
- export CC=`pwd`/cc
- export CXX=`pwd`/cxx
+ # Recommended by CCache: https://ccache.samba.org/manual.html#_run_modes
+ # Add to the beginning of path to ensure this redirection is picked up instead
+ # of the original ones. Especially CUDA/NVCC appends itself to the beginning of the
+ # path and thus this redirect is ignored. This change fixes this problem
+ # This hacky approach with symbolic links is required because underlying build
+ # systems of our submodules ignore our CMake settings. If they use Makefile,
+ # we can't influence them at all in general and NVCC also prefers to hardcode their
+ # compiler instead of respecting the settings. Thus, we take this brutal approach
+ # and just redirect everything of this installer has been called.
+ # In future, we could do these links during image build time of the container.
+ # But in the beginning, we'll make this opt-in. In future, loads of processes like
+ # the scala make step or numpy compilation and other pip package generations
+ # could be heavily sped up by using ccache as well.
+ mkdir /tmp/ccache-redirects
+ export PATH=/tmp/ccache-redirects:$PATH
+ ln -s ccache /tmp/ccache-redirects/gcc
+ ln -s ccache /tmp/ccache-redirects/gcc-8
+ ln -s ccache /tmp/ccache-redirects/g++
+ ln -s ccache /tmp/ccache-redirects/g++-8
+ ln -s ccache /tmp/ccache-redirects/nvcc
+ ln -s ccache /tmp/ccache-redirects/clang++-3.9
+ ln -s ccache /tmp/ccache-redirects/clang-3.9
+ ln -s ccache /tmp/ccache-redirects/clang++-5.0
+ ln -s ccache /tmp/ccache-redirects/clang-5.0
+ ln -s ccache /tmp/ccache-redirects/clang++-6.0
+ ln -s ccache /tmp/ccache-redirects/clang-6.0
+ ln -s ccache /usr/local/bin/gcc
+ ln -s ccache /usr/local/bin/gcc-8
+ ln -s ccache /usr/local/bin/g++
+ ln -s ccache /usr/local/bin/g++-8
+ ln -s ccache /usr/local/bin/nvcc
+ ln -s ccache /usr/local/bin/clang++-3.9
+ ln -s ccache /usr/local/bin/clang-3.9
+ ln -s ccache /usr/local/bin/clang++-5.0
+ ln -s ccache /usr/local/bin/clang-5.0
+ ln -s ccache /usr/local/bin/clang++-6.0
+ ln -s ccache /usr/local/bin/clang-6.0
+
+ export NVCC=ccache
+
+ # Uncomment if you would like to debug CCache hit rates.
+ # You can monitor using tail -f ccache-log
+ # export CCACHE_LOGFILE=/work/mxnet/ccache-log
+ # export CCACHE_DEBUG=1
}
build_wheel() {
@@ -106,6 +138,8 @@ build_jetson() {
set -ex
pushd .
+ #build_ccache_wrappers
+
cp make/crosscompile.jetson.mk ./config.mk
make -j$(nproc)
@@ -129,6 +163,7 @@ build_armv6() {
# We do not need OpenMP, since most armv6 systems have only 1 core
+ build_ccache_wrappers
cmake \
-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
@@ -159,6 +194,7 @@ build_armv7() {
# file tries to add -llapack. Lapack functionality though, requires -lgfortran
# to be linked additionally.
+ build_ccache_wrappers
cmake \
-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} \
-DCMAKE_CROSSCOMPILING=ON \
@@ -181,6 +217,7 @@ build_armv7() {
}
build_armv8() {
+ build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
@@ -205,6 +242,7 @@ build_armv8() {
build_android_armv7() {
set -ex
cd /work/build
+ build_ccache_wrappers
cmake \
-DANDROID=ON\
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
@@ -225,6 +263,7 @@ build_android_armv7() {
build_android_armv8() {
set -ex
cd /work/build
+ build_ccache_wrappers
cmake\
-DANDROID=ON \
-DUSE_CUDA=OFF\
@@ -244,19 +283,21 @@ build_centos7_cpu() {
cd /work/mxnet
export CC="ccache gcc"
export CXX="ccache g++"
-
+ build_ccache_wrappers
make \
DEV=1 \
USE_LAPACK=1 \
ENABLE_TESTCOVERAGE=1 \
USE_LAPACK_PATH=/usr/lib64/liblapack.so \
USE_BLAS=openblas \
+ USE_MKLDNN=0 \
USE_DIST_KVSTORE=1 \
-j$(nproc)
}
build_amzn_linux_cpu() {
cd /work/build
+ build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
@@ -273,19 +314,17 @@ build_amzn_linux_cpu() {
ninja -v
}
-
build_centos7_mkldnn() {
set -ex
cd /work/mxnet
export CC="ccache gcc"
export CXX="ccache g++"
-
+ build_ccache_wrappers
make \
DEV=1 \
ENABLE_TESTCOVERAGE=1 \
USE_LAPACK=1 \
USE_LAPACK_PATH=/usr/lib64/liblapack.so \
- USE_MKLDNN=1 \
USE_BLAS=openblas \
-j$(nproc)
}
@@ -294,13 +333,14 @@ build_centos7_gpu() {
set -ex
cd /work/mxnet
# unfortunately this build has problems in 3rdparty dependencies with ccache and make
- # build_ccache_wrappers
+ build_ccache_wrappers
make \
DEV=1 \
ENABLE_TESTCOVERAGE=1 \
USE_LAPACK=1 \
USE_LAPACK_PATH=/usr/lib64/liblapack.so \
USE_BLAS=openblas \
+ USE_MKLDNN=0 \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
@@ -315,13 +355,15 @@ build_ubuntu_cpu() {
build_ubuntu_cpu_openblas() {
set -ex
- export CC="ccache gcc"
- export CXX="ccache g++"
+ export CC="gcc"
+ export CXX="g++"
+ build_ccache_wrappers
make \
DEV=1 \
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
+ USE_MKLDNN=0 \
USE_DIST_KVSTORE=1 \
-j$(nproc)
}
@@ -335,6 +377,7 @@ build_ubuntu_cpu_mkl() {
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=mkl \
+ USE_MKLDNN=0 \
USE_INTEL_PATH=/opt/intel \
USE_DIST_KVSTORE=1 \
-j$(nproc)
@@ -344,6 +387,7 @@ build_ubuntu_cpu_cmake_debug() {
set -ex
pushd .
cd /work/build
+ build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
@@ -352,6 +396,7 @@ build_ubuntu_cpu_cmake_debug() {
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_OPENMP=OFF \
-DUSE_OPENCV=ON \
+ -DUSE_SIGNAL_HANDLER=ON \
-DCMAKE_BUILD_TYPE=Debug \
-G Ninja \
/work/mxnet
@@ -365,13 +410,15 @@ build_ubuntu_cpu_cmake_asan() {
pushd .
cd /work/build
+ export CXX=g++-8
+ export CC=gcc-8
+ build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
- -DCMAKE_CXX_COMPILER=/usr/bin/g++-8 \
- -DCMAKE_C_COMPILER=/usr/bin/gcc-8 \
-DUSE_CUDA=OFF \
-DUSE_MKL_IF_AVAILABLE=OFF \
+ -DUSE_MKLDNN=OFF \
-DUSE_OPENMP=OFF \
-DUSE_OPENCV=OFF \
-DCMAKE_BUILD_TYPE=Debug \
@@ -391,13 +438,14 @@ build_ubuntu_cpu_cmake_asan() {
build_ubuntu_cpu_clang39() {
set -ex
- export CXX=clang++-3.9
+ export CXX=clang++-3.9
export CC=clang-3.9
- build_ccache_wrappers
- make \
+ build_ccache_wrappers
+ make \
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
+ USE_MKLDNN=0 \
USE_OPENMP=0 \
USE_DIST_KVSTORE=1 \
-j$(nproc)
@@ -415,6 +463,7 @@ build_ubuntu_cpu_clang60() {
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
+ USE_MKLDNN=0 \
USE_OPENMP=1 \
USE_DIST_KVSTORE=1 \
-j$(nproc)
@@ -429,10 +478,12 @@ build_ubuntu_cpu_clang_tidy() {
pushd .
cd /work/build
+ build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DUSE_CUDA=OFF \
+ -DUSE_MKLDNN=OFF \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_OPENCV=ON \
-DCMAKE_BUILD_TYPE=Debug \
@@ -458,7 +509,6 @@ build_ubuntu_cpu_clang39_mkldnn() {
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
- USE_MKLDNN=1 \
USE_OPENMP=0 \
-j$(nproc)
}
@@ -475,7 +525,6 @@ build_ubuntu_cpu_clang60_mkldnn() {
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
- USE_MKLDNN=1 \
USE_OPENMP=1 \
-j$(nproc)
}
@@ -490,7 +539,6 @@ build_ubuntu_cpu_mkldnn() {
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
- USE_MKLDNN=1 \
-j$(nproc)
}
@@ -504,7 +552,6 @@ build_ubuntu_cpu_mkldnn_mkl() {
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=mkl \
- USE_MKLDNN=1 \
-j$(nproc)
}
@@ -526,6 +573,8 @@ build_ubuntu_gpu_tensorrt() {
mkdir -p build
cd build
cmake \
+ -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
+ -DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DCMAKE_CXX_FLAGS=-I/usr/include/python${PYVER}\
-DBUILD_SHARED_LIBS=ON ..\
-G Ninja
@@ -540,7 +589,10 @@ build_ubuntu_gpu_tensorrt() {
cd 3rdparty/onnx-tensorrt/
mkdir -p build
cd build
- cmake ..
+ cmake \
+ -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
+ -DCMAKE_C_COMPILER_LAUNCHER=ccache \
+ ..
make -j$(nproc)
export LIBRARY_PATH=`pwd`:$LIBRARY_PATH
popd
@@ -559,6 +611,7 @@ build_ubuntu_gpu_tensorrt() {
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_OPENCV=0 \
+ USE_MKLDNN=0 \
USE_DIST_KVSTORE=0 \
USE_TENSORRT=1 \
USE_JEMALLOC=0 \
@@ -578,7 +631,6 @@ build_ubuntu_gpu_mkldnn() {
ENABLE_TESTCOVERAGE=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
- USE_MKLDNN=1 \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
@@ -595,7 +647,6 @@ build_ubuntu_gpu_mkldnn_nocudnn() {
DEV=1 \
ENABLE_TESTCOVERAGE=1 \
USE_BLAS=openblas \
- USE_MKLDNN=1 \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=0 \
@@ -611,6 +662,7 @@ build_ubuntu_gpu_cuda91_cudnn7() {
DEV=1 \
ENABLE_TESTCOVERAGE=1 \
USE_BLAS=openblas \
+ USE_MKLDNN=0 \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
@@ -623,6 +675,7 @@ build_ubuntu_gpu_cuda91_cudnn7() {
build_ubuntu_amalgamation() {
set -ex
# Amalgamation can not be run with -j nproc
+ build_ccache_wrappers
make -C amalgamation/ clean
make -C amalgamation/ \
USE_BLAS=openblas \
@@ -632,6 +685,7 @@ build_ubuntu_amalgamation() {
build_ubuntu_amalgamation_min() {
set -ex
# Amalgamation can not be run with -j nproc
+ build_ccache_wrappers
make -C amalgamation/ clean
make -C amalgamation/ \
USE_BLAS=openblas \
@@ -642,14 +696,16 @@ build_ubuntu_amalgamation_min() {
build_ubuntu_gpu_cmake_mkldnn() {
set -ex
cd /work/build
+ build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
+ -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \
+ -DUSE_SIGNAL_HANDLER=ON \
-DENABLE_TESTCOVERAGE=ON \
-DUSE_CUDA=1 \
-DUSE_CUDNN=1 \
-DUSE_MKLML_MKL=1 \
- -DUSE_MKLDNN=1 \
-DCMAKE_BUILD_TYPE=Release \
-DCUDA_ARCH_NAME=Manual \
-DCUDA_ARCH_BIN=$CI_CMAKE_CUDA_ARCH_BIN \
@@ -657,20 +713,27 @@ build_ubuntu_gpu_cmake_mkldnn() {
/work/mxnet
ninja -v
+ # libmkldnn.so.0 is a link file. We need an actual binary file named libmkldnn.so.0.
+ cp 3rdparty/mkldnn/src/libmkldnn.so.0 3rdparty/mkldnn/src/libmkldnn.so.0.tmp
+ mv 3rdparty/mkldnn/src/libmkldnn.so.0.tmp 3rdparty/mkldnn/src/libmkldnn.so.0
}
build_ubuntu_gpu_cmake() {
set -ex
cd /work/build
+ build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
+ -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \
+ -DUSE_SIGNAL_HANDLER=ON \
-DENABLE_TESTCOVERAGE=ON \
- -DUSE_CUDA=1 \
- -DUSE_CUDNN=1 \
- -DUSE_MKLML_MKL=0 \
- -DUSE_MKLDNN=0 \
- -DUSE_DIST_KVSTORE=1 \
+ -DUSE_CUDA=ON \
+ -DUSE_CUDNN=ON \
+ -DUSE_MKL_IF_AVAILABLE=OFF \
+ -DUSE_MKLML_MKL=OFF \
+ -DUSE_MKLDNN=OFF \
+ -DUSE_DIST_KVSTORE=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCUDA_ARCH_NAME=Manual \
-DCUDA_ARCH_BIN=$CI_CMAKE_CUDA_ARCH_BIN \
@@ -783,21 +846,25 @@ unittest_ubuntu_python3_quantization_gpu() {
unittest_ubuntu_cpu_scala() {
set -ex
- make scalapkg USE_BLAS=openblas USE_DIST_KVSTORE=1 ENABLE_TESTCOVERAGE=1
- make scalaunittest USE_BLAS=openblas USE_DIST_KVSTORE=1 ENABLE_TESTCOVERAGE=1
+ scala_prepare
+ cd scala-package
+ mvn -B integration-test
}
unittest_centos7_cpu_scala() {
set -ex
cd /work/mxnet
- make scalapkg USE_BLAS=openblas USE_DIST_KVSTORE=1 ENABLE_TESTCOVERAGE=1
- make scalaunittest USE_BLAS=openblas USE_DIST_KVSTORE=1 ENABLE_TESTCOVERAGE=1
+ scala_prepare
+ cd scala-package
+ mvn -B integration-test
}
unittest_ubuntu_cpu_clojure() {
set -ex
- make scalapkg USE_OPENCV=1 USE_BLAS=openblas USE_DIST_KVSTORE=1 ENABLE_TESTCOVERAGE=1
- make scalainstall USE_OPENCV=1 USE_BLAS=openblas USE_DIST_KVSTORE=1 ENABLE_TESTCOVERAGE=1
+ scala_prepare
+ cd scala-package
+ mvn -B install
+ cd ..
./contrib/clojure-package/ci-test.sh
}
@@ -806,7 +873,7 @@ unittest_ubuntu_cpugpu_perl() {
./perl-package/test.sh
}
-unittest_ubuntu_gpu_cpp() {
+unittest_cpp() {
set -ex
build/tests/mxnet_unit_tests
}
@@ -858,6 +925,7 @@ unittest_ubuntu_cpu_julia06() {
# FIXME
export LD_PRELOAD='/usr/lib/x86_64-linux-gnu/libjemalloc.so'
+ export LD_LIBRARY_PATH=/work/mxnet/lib:$LD_LIBRARY_PATH
# use the prebuilt binary from $MXNET_HOME/lib
julia -e 'Pkg.build("MXNet")'
@@ -886,11 +954,10 @@ unittest_centos7_gpu() {
integrationtest_ubuntu_cpu_onnx() {
set -ex
export PYTHONPATH=./python/
- pytest tests/python-pytest/onnx/import/mxnet_backend_test.py
- pytest tests/python-pytest/onnx/import/onnx_import_test.py
- pytest tests/python-pytest/onnx/import/gluon_backend_test.py
- pytest tests/python-pytest/onnx/export/onnx_backend_test.py
- python tests/python-pytest/onnx/export/mxnet_export_test.py
+ python tests/python-pytest/onnx/backend_test.py
+ pytest tests/python-pytest/onnx/mxnet_export_test.py
+ pytest tests/python-pytest/onnx/test_models.py
+ pytest tests/python-pytest/onnx/test_node.py
}
integrationtest_ubuntu_gpu_python() {
@@ -941,8 +1008,10 @@ integrationtest_ubuntu_cpu_dist_kvstore() {
integrationtest_ubuntu_gpu_scala() {
set -ex
- make scalapkg USE_OPENCV=1 USE_BLAS=openblas USE_CUDA=1 USE_CUDA_PATH=/usr/local/cuda USE_CUDNN=1 USE_DIST_KVSTORE=1 SCALA_ON_GPU=1 ENABLE_TESTCOVERAGE=1
- make scalaintegrationtest USE_OPENCV=1 USE_BLAS=openblas USE_CUDA=1 USE_CUDA_PATH=/usr/local/cuda USE_CUDNN=1 SCALA_TEST_ON_GPU=1 USE_DIST_KVSTORE=1 ENABLE_TESTCOVERAGE=1
+ scala_prepare
+ cd scala-package
+ export SCALA_TEST_ON_GPU=1
+ mvn -B integration-test -DskipTests=false
}
integrationtest_ubuntu_gpu_dist_kvstore() {
@@ -1006,7 +1075,6 @@ build_docs() {
popd
}
-
# Functions that run the nightly Tests:
#Runs Apache RAT Check on MXNet Source for License Headers
@@ -1135,7 +1203,7 @@ nightly_straight_dope_python3_multi_gpu_tests() {
nightly_tutorial_test_ubuntu_python3_gpu() {
set -ex
cd /work/mxnet/docs
- export BUILD_VER=tutorial
+ export BUILD_VER=tutorial
export MXNET_DOCS_BUILD_MXNET=0
make html
export MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0
@@ -1158,6 +1226,14 @@ nightly_tutorial_test_ubuntu_python2_gpu() {
nosetests-3.4 $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_tutorials.xml test_tutorials.py --nologcapture
}
+nightly_java_demo_test_cpu() {
+ set -ex
+ cd /work/mxnet/scala-package/mxnet-demo/java-demo
+ make javademo
+ ./bin/java_sample.sh
+ ./bin/run_od.sh
+}
+
# Deploy
@@ -1165,7 +1241,7 @@ deploy_docs() {
set -ex
pushd .
- make docs
+ make docs SPHINXOPTS=-W
popd
}
@@ -1184,6 +1260,7 @@ deploy_jl_docs() {
# FIXME
export LD_PRELOAD='/usr/lib/x86_64-linux-gnu/libjemalloc.so'
+ export LD_LIBRARY_PATH=/work/mxnet/lib:$LD_LIBRARY_PATH
# use the prebuilt binary from $MXNET_HOME/lib
julia -e 'Pkg.build("MXNet")'
@@ -1195,6 +1272,30 @@ deploy_jl_docs() {
# ...
}
+publish_scala_build() {
+ set -ex
+ pushd .
+ scala_prepare
+ ./ci/publish/scala/build.sh
+ popd
+}
+
+publish_scala_test() {
+ set -ex
+ pushd .
+ scala_prepare
+ ./ci/publish/scala/test.sh
+ popd
+}
+
+publish_scala_deploy() {
+ set -ex
+ pushd .
+ scala_prepare
+ ./ci/publish/scala/deploy.sh
+ popd
+}
+
# broken_link_checker
broken_link_checker() {
@@ -1221,5 +1322,3 @@ EOF
declare -F | cut -d' ' -f3
echo
fi
-
-
diff --git a/ci/docker_cache.py b/ci/docker_cache.py
index fe1882a567aa..f906b0eba66c 100755
--- a/ci/docker_cache.py
+++ b/ci/docker_cache.py
@@ -30,12 +30,16 @@
import sys
import subprocess
import json
-import time
from typing import *
import build as build_util
+from util import retry
DOCKERHUB_LOGIN_NUM_RETRIES = 5
DOCKERHUB_RETRY_SECONDS = 5
+DOCKER_CACHE_NUM_RETRIES = 3
+DOCKER_CACHE_TIMEOUT_MINS = 15
+PARALLEL_BUILDS = 10
+
def build_save_containers(platforms, registry, load_cache) -> int:
"""
@@ -49,7 +53,7 @@ def build_save_containers(platforms, registry, load_cache) -> int:
if len(platforms) == 0:
return 0
- platform_results = Parallel(n_jobs=len(platforms), backend="multiprocessing")(
+ platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")(
delayed(_build_save_container)(platform, registry, load_cache)
for platform in platforms)
@@ -107,6 +111,8 @@ def _upload_image(registry, docker_tag, image_id) -> None:
subprocess.check_call(push_cmd)
+@retry(target_exception=subprocess.CalledProcessError, tries=DOCKERHUB_LOGIN_NUM_RETRIES,
+ delay_s=DOCKERHUB_RETRY_SECONDS)
def _login_dockerhub():
"""
Login to the Docker Hub account
@@ -114,30 +120,19 @@ def _login_dockerhub():
"""
dockerhub_credentials = _get_dockerhub_credentials()
- for i in range(DOCKERHUB_LOGIN_NUM_RETRIES):
- logging.info('Logging in to DockerHub')
- # We use password-stdin instead of --password to avoid leaking passwords in case of an error.
- # This method will produce the following output:
- # > WARNING! Your password will be stored unencrypted in /home/jenkins_slave/.docker/config.json.
- # > Configure a credential helper to remove this warning. See
- # > https://docs.docker.com/engine/reference/commandline/login/#credentials-store
- # Since we consider the restricted slaves a secure environment, that's fine. Also, using this will require
- # third party applications which would need a review first as well.
- p = subprocess.run(['docker', 'login', '--username', dockerhub_credentials['username'], '--password-stdin'],
- stdout=subprocess.PIPE, input=str.encode(dockerhub_credentials['password']))
- logging.info(p.stdout)
- if p.returncode != 0:
- logging.error('Error logging in to DockerHub')
- logging.error(p.stderr)
-
- # Linear backoff
- time.sleep(1000 * DOCKERHUB_RETRY_SECONDS * (i + 1))
- else:
- logging.info('Successfully logged in to DockerHub')
- break
- else:
- logging.error('DockerHub login not possible after %d retries, aborting', DOCKERHUB_LOGIN_NUM_RETRIES)
- raise Exception('Unable to log in to DockerHub')
+ logging.info('Logging in to DockerHub')
+ # We use password-stdin instead of --password to avoid leaking passwords in case of an error.
+ # This method will produce the following output:
+ # > WARNING! Your password will be stored unencrypted in /home/jenkins_slave/.docker/config.json.
+ # > Configure a credential helper to remove this warning. See
+ # > https://docs.docker.com/engine/reference/commandline/login/#credentials-store
+ # Since we consider the restricted slaves a secure environment, that's fine. Also, using this will require
+ # third party applications which would need a review first as well.
+ p = subprocess.run(['docker', 'login', '--username', dockerhub_credentials['username'], '--password-stdin'],
+ stdout=subprocess.PIPE, input=str.encode(dockerhub_credentials['password']))
+ logging.info(p.stdout)
+ logging.info('Successfully logged in to DockerHub')
+
def _logout_dockerhub():
"""
@@ -149,6 +144,8 @@ def _logout_dockerhub():
logging.info('Successfully logged out of DockerHub')
+@retry(target_exception=subprocess.TimeoutExpired, tries=DOCKER_CACHE_NUM_RETRIES,
+ delay_s=DOCKERHUB_RETRY_SECONDS)
def load_docker_cache(registry, docker_tag) -> None:
"""
Load the precompiled docker cache from the registry
@@ -163,7 +160,10 @@ def load_docker_cache(registry, docker_tag) -> None:
logging.info('Loading Docker cache for %s from %s', docker_tag, registry)
pull_cmd = ['docker', 'pull', docker_tag]
- subprocess.call(pull_cmd) # Don't throw an error if the image does not exist
+
+ # Don't throw an error if the image does not exist
+ subprocess.run(pull_cmd, timeout=DOCKER_CACHE_TIMEOUT_MINS*60)
+ logging.info('Successfully pulled docker cache')
def delete_local_docker_cache(docker_tag):
@@ -211,8 +211,7 @@ def _get_dockerhub_credentials(): # pragma: no cover
logging.exception("The request was invalid due to:")
elif client_error.response['Error']['Code'] == 'InvalidParameterException':
logging.exception("The request had invalid params:")
- else:
- raise
+ raise
else:
secret = get_secret_value_response['SecretString']
secret_dict = json.loads(secret)
diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy
index d5cbd97683ed..33d76aa1668a 100644
--- a/ci/jenkins/Jenkins_steps.groovy
+++ b/ci/jenkins/Jenkins_steps.groovy
@@ -23,19 +23,17 @@
utils = load('ci/Jenkinsfile_utils.groovy')
// mxnet libraries
-mx_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libiomp5.so, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
+mx_lib = 'lib/libmxnet.so, lib/libmxnet.a, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
// Python wheels
mx_pip = 'build/*.whl'
-// for scala build, need to pass extra libs when run with dist_kvstore
-mx_dist_lib = 'lib/libmxnet.so, lib/libmxnet.a, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, lib/libmkldnn.a'
// mxnet cmake libraries, in cmake builds we do not produce a libnvvm static library by default.
mx_cmake_lib = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so'
// mxnet cmake libraries, in cmake builds we do not produce a libnvvm static library by default.
mx_cmake_lib_debug = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests'
-mx_cmake_mkldnn_lib = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so'
-mx_mkldnn_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libiomp5.so, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
+mx_cmake_mkldnn_lib = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so, build/3rdparty/mkldnn/src/libmkldnn.so.0'
+mx_mkldnn_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libiomp5.so, lib/libmkldnn.so.0, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
mx_tensorrt_lib = 'lib/libmxnet.so, lib/libnvonnxparser_runtime.so.0, lib/libnvonnxparser.so.0, lib/libonnx_proto.so, lib/libonnx.so'
mx_lib_cpp_examples = 'lib/libmxnet.so, lib/libmxnet.a, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/*'
mx_lib_cpp_examples_cpu = 'build/libmxnet.so, build/cpp-package/example/*'
@@ -100,7 +98,7 @@ def compile_unix_cpu_openblas() {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_openblas', false)
- utils.pack_lib('cpu', mx_dist_lib, true)
+ utils.pack_lib('cpu', mx_lib, true)
}
}
}
@@ -108,7 +106,7 @@ def compile_unix_cpu_openblas() {
}
def compile_unix_openblas_debug_cpu() {
- return ['CPU: Openblas, debug': {
+ return ['CPU: Openblas, cmake, debug': {
node(NODE_LINUX_CPU) {
ws('workspace/build-cpu-openblas') {
timeout(time: max_time, unit: 'MINUTES') {
@@ -128,7 +126,7 @@ def compile_unix_mkl_cpu() {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_mkl', false)
- utils.pack_lib('cpu_mkl', mx_dist_lib, true)
+ utils.pack_lib('cpu_mkl', mx_mkldnn_lib, true)
}
}
}
@@ -254,7 +252,7 @@ def compile_centos7_cpu() {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('centos7_cpu', 'build_centos7_cpu', false)
- utils.pack_lib('centos7_cpu', mx_dist_lib, true)
+ utils.pack_lib('centos7_cpu', mx_lib, true)
}
}
}
@@ -268,7 +266,7 @@ def compile_centos7_cpu_mkldnn() {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('centos7_cpu', 'build_centos7_mkldnn', false)
- utils.pack_lib('centos7_mkldnn', mx_lib, true)
+ utils.pack_lib('centos7_mkldnn', mx_mkldnn_lib, true)
}
}
}
@@ -825,7 +823,21 @@ def test_unix_scala_cpu() {
node(NODE_LINUX_CPU) {
ws('workspace/ut-scala-cpu') {
timeout(time: max_time, unit: 'MINUTES') {
- utils.unpack_and_init('cpu', mx_dist_lib, true)
+ utils.unpack_and_init('cpu', mx_lib, true)
+ utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_scala', false)
+ utils.publish_test_coverage()
+ }
+ }
+ }
+ }]
+}
+
+def test_unix_scala_mkldnn_cpu(){
+ return ['Scala: MKLDNN-CPU': {
+ node(NODE_LINUX_CPU) {
+ ws('workspace/ut-scala-mkldnn-cpu') {
+ timeout(time: max_time, unit: 'MINUTES') {
+ utils.unpack_and_init('mkldnn_cpu', mx_mkldnn_lib, true)
utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_scala', false)
utils.publish_test_coverage()
}
@@ -839,7 +851,7 @@ def test_unix_scala_gpu() {
node(NODE_LINUX_GPU) {
ws('workspace/ut-scala-gpu') {
timeout(time: max_time, unit: 'MINUTES') {
- utils.unpack_and_init('gpu', mx_dist_lib, true)
+ utils.unpack_and_init('gpu', mx_lib, true)
utils.docker_run('ubuntu_gpu', 'integrationtest_ubuntu_gpu_scala', true)
utils.publish_test_coverage()
}
@@ -853,7 +865,7 @@ def test_unix_clojure_cpu() {
node(NODE_LINUX_CPU) {
ws('workspace/ut-clojure-cpu') {
timeout(time: max_time, unit: 'MINUTES') {
- utils.unpack_and_init('cpu', mx_dist_lib, true)
+ utils.unpack_and_init('cpu', mx_lib, true)
utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_clojure', false)
utils.publish_test_coverage()
}
@@ -882,7 +894,7 @@ def test_unix_cpp_gpu() {
ws('workspace/ut-cpp-gpu') {
timeout(time: max_time, unit: 'MINUTES') {
utils.unpack_and_init('cmake_gpu', mx_cmake_lib, true)
- utils.docker_run('ubuntu_gpu', 'unittest_ubuntu_gpu_cpp', true)
+ utils.docker_run('ubuntu_gpu', 'unittest_cpp', true)
utils.publish_test_coverage()
}
}
@@ -896,7 +908,21 @@ def test_unix_cpp_mkldnn_gpu() {
ws('workspace/ut-cpp-mkldnn-gpu') {
timeout(time: max_time, unit: 'MINUTES') {
utils.unpack_and_init('cmake_mkldnn_gpu', mx_cmake_mkldnn_lib, true)
- utils.docker_run('ubuntu_gpu', 'unittest_ubuntu_gpu_cpp', true)
+ utils.docker_run('ubuntu_gpu', 'unittest_cpp', true)
+ utils.publish_test_coverage()
+ }
+ }
+ }
+ }]
+}
+
+def test_unix_cpp_cpu() {
+ return ['Cpp: CPU': {
+ node(NODE_LINUX_CPU) {
+ ws('workspace/ut-cpp-cpu') {
+ timeout(time: max_time, unit: 'MINUTES') {
+ utils.unpack_and_init('cpu_debug', mx_cmake_lib_debug, true)
+ utils.docker_run('ubuntu_cpu', 'unittest_cpp', false)
utils.publish_test_coverage()
}
}
@@ -1015,7 +1041,7 @@ def test_centos7_scala_cpu() {
node(NODE_LINUX_CPU) {
ws('workspace/ut-scala-centos7-cpu') {
timeout(time: max_time, unit: 'MINUTES') {
- utils.unpack_and_init('centos7_cpu', mx_dist_lib, true)
+ utils.unpack_and_init('centos7_cpu', mx_lib, true)
utils.docker_run('centos7_cpu', 'unittest_centos7_cpu_scala', false)
utils.publish_test_coverage()
}
diff --git a/ci/jenkins/Jenkinsfile_centos_cpu b/ci/jenkins/Jenkinsfile_centos_cpu
index 3b66f8100173..a47ab3de7fb7 100644
--- a/ci/jenkins/Jenkinsfile_centos_cpu
+++ b/ci/jenkins/Jenkinsfile_centos_cpu
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/jenkins/Jenkinsfile_centos_gpu b/ci/jenkins/Jenkinsfile_centos_gpu
index aec3b9054f2f..cad77a9a7dd8 100644
--- a/ci/jenkins/Jenkinsfile_centos_gpu
+++ b/ci/jenkins/Jenkinsfile_centos_gpu
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/jenkins/Jenkinsfile_clang b/ci/jenkins/Jenkinsfile_clang
index 61920cf9865b..029c7208107b 100644
--- a/ci/jenkins/Jenkinsfile_clang
+++ b/ci/jenkins/Jenkinsfile_clang
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/jenkins/Jenkinsfile_edge b/ci/jenkins/Jenkinsfile_edge
index 275a0c96de94..9d8e01399d7c 100644
--- a/ci/jenkins/Jenkinsfile_edge
+++ b/ci/jenkins/Jenkinsfile_edge
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
@@ -34,7 +34,7 @@ utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_
utils.main_wrapper(
core_logic: {
utils.parallel_stage('Build', [
-// custom_steps.compile_armv8_jetson_gpu(),
+ custom_steps.compile_armv8_jetson_gpu(),
custom_steps.compile_armv7_cpu(),
custom_steps.compile_armv6_cpu(),
custom_steps.compile_armv8_cpu(),
diff --git a/ci/jenkins/Jenkinsfile_miscellaneous b/ci/jenkins/Jenkinsfile_miscellaneous
index c02cc991b864..dbf2a9e41c76 100644
--- a/ci/jenkins/Jenkinsfile_miscellaneous
+++ b/ci/jenkins/Jenkinsfile_miscellaneous
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
diff --git a/ci/jenkins/Jenkinsfile_sanity b/ci/jenkins/Jenkinsfile_sanity
index 123fedfdab79..ed4d16ec47db 100644
--- a/ci/jenkins/Jenkinsfile_sanity
+++ b/ci/jenkins/Jenkinsfile_sanity
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/jenkins/Jenkinsfile_unix_cpu b/ci/jenkins/Jenkinsfile_unix_cpu
index e581bcf65dc5..00b1aa9f68d9 100644
--- a/ci/jenkins/Jenkinsfile_unix_cpu
+++ b/ci/jenkins/Jenkinsfile_unix_cpu
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
@@ -50,11 +50,12 @@ core_logic: {
custom_steps.test_unix_python3_mkldnn_cpu(),
custom_steps.test_unix_python3_mkldnn_mkl_cpu(),
custom_steps.test_unix_scala_cpu(),
+ custom_steps.test_unix_scala_mkldnn_cpu(),
custom_steps.test_unix_clojure_cpu(),
custom_steps.test_unix_r_cpu(),
custom_steps.test_unix_julia_cpu(),
custom_steps.test_unix_onnx_cpu(),
-
+ custom_steps.test_unix_cpp_cpu(),
/* Disabled due to master build failure:
* http://jenkins.mxnet-ci.amazon-ml.com/blue/organizations/jenkins/incubator-mxnet/detail/master/1221/pipeline/
* /~https://github.com/apache/incubator-mxnet/issues/11801
diff --git a/ci/jenkins/Jenkinsfile_unix_gpu b/ci/jenkins/Jenkinsfile_unix_gpu
index cf92836e96e4..bd884904d596 100644
--- a/ci/jenkins/Jenkinsfile_unix_gpu
+++ b/ci/jenkins/Jenkinsfile_unix_gpu
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/jenkins/Jenkinsfile_website b/ci/jenkins/Jenkinsfile_website
index 050f509e68e9..acdd2be4d00e 100644
--- a/ci/jenkins/Jenkinsfile_website
+++ b/ci/jenkins/Jenkinsfile_website
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/jenkins/Jenkinsfile_windows_cpu b/ci/jenkins/Jenkinsfile_windows_cpu
index 9e70df38dca5..a8746db73d34 100644
--- a/ci/jenkins/Jenkinsfile_windows_cpu
+++ b/ci/jenkins/Jenkinsfile_windows_cpu
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/jenkins/Jenkinsfile_windows_gpu b/ci/jenkins/Jenkinsfile_windows_gpu
index 69fd07343859..2319f25942de 100644
--- a/ci/jenkins/Jenkinsfile_windows_gpu
+++ b/ci/jenkins/Jenkinsfile_windows_gpu
@@ -21,7 +21,7 @@
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
// timeout in minutes
-max_time = 120
+max_time = 180
node('utility') {
// Loading the utilities requires a node context unfortunately
diff --git a/ci/publish/Jenkinsfile b/ci/publish/Jenkinsfile
new file mode 100644
index 000000000000..9a360c6b5bed
--- /dev/null
+++ b/ci/publish/Jenkinsfile
@@ -0,0 +1,107 @@
+// -*- mode: groovy -*-
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Jenkins pipeline
+// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
+
+//mxnet libraries
+mx_scala_pub = 'lib/libmxnet.so, lib/libmxnet.a, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, config.mk, scala-package/pom.xml, scala-package/**/pom.xml, scala-package/*/target/**, scala-package/local-snapshot/**'
+
+// timeout in minutes
+max_time = 120
+
+node('restricted-utility') {
+ // Loading the utilities requires a node context unfortunately
+ checkout scm
+ utils = load('ci/Jenkinsfile_utils.groovy')
+}
+utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-mxnetlinux-cpu', linux_gpu: 'restricted-mxnetlinux-gpu', linux_gpu_p3: 'restricted-mxnetlinux-gpu-p3', windows_cpu: 'restricted-mxnetwindows-cpu', windows_gpu: 'restricted-mxnetwindows-gpu')
+
+// CPU and GPU. OSX nodes are not currently supported by Jenkins
+def nodeMap = ['cpu': NODE_LINUX_CPU, 'gpu': NODE_LINUX_GPU]
+def scalaOSMap = ['cpu': 'linux-x86_64-cpu', 'gpu': 'linux-x86_64-gpu']
+
+def wrapStep(nodeToRun, workspaceName, step) {
+ return {
+ node(nodeToRun) {
+ ws("workspace/${workspaceName}") {
+ timeout(time: max_time, unit: 'MINUTES') {
+ step()
+ }
+ }
+ }
+ }
+}
+
+def toBuild = [:]
+def labels = ['cpu'] // , 'gpu']
+for (x in labels) {
+ def label = x // Required due to language
+ toBuild["Scala Build ${label}"] = wrapStep(nodeMap[label], "build-scala-${label}") {
+ withEnv(["MAVEN_PUBLISH_OS_TYPE=${scalaOSMap[label]}"]) {
+ utils.init_git()
+ utils.docker_run("ubuntu_${label}", 'publish_scala_build', label == 'gpu', '500m', 'MAVEN_PUBLISH_OS_TYPE')
+ utils.pack_lib("scala_${label}", mx_scala_pub, false)
+ }
+ }
+}
+
+def toTest = [:]
+def systems = ['ubuntu1604', 'ubuntu1804', 'centos7']
+for (x in labels) {
+ def label = x // Required due to language
+ for (y in systems) {
+ def system = y // Required due to language
+ toTest["Scala Test ${system} ${label}"] = wrapStep(nodeMap[label], "test-scala-${system}-${label}") {
+ utils.unpack_and_init("scala_${label}", mx_scala_pub, false)
+ utils.docker_run("publish.test.${system}_${label}", 'publish_scala_test', label == 'gpu')
+ }
+ }
+}
+
+def toDeploy = [:]
+for (x in labels) {
+ def label = x // Required due to language
+ toDeploy["Scala Deploy ${label}"] = wrapStep(nodeMap[label], "deploy-scala-${label}") {
+ withEnv(["MAVEN_PUBLISH_OS_TYPE=${scalaOSMap[label]}"]) {
+ utils.unpack_and_init("scala_${label}", mx_scala_pub, false)
+ utils.docker_run("ubuntu_${label}", 'publish_scala_deploy', label == 'gpu', '500m', 'MAVEN_PUBLISH_OS_TYPE MAVEN_PUBLISH_SECRET_ENDPOINT_URL MAVEN_PUBLISH_SECRET_NAME_CREDENTIALS MAVEN_PUBLISH_SECRET_NAME_GPG DOCKERHUB_SECRET_ENDPOINT_REGION')
+ }
+ }
+}
+
+utils.main_wrapper(
+core_logic: {
+ stage('Build Packages') {
+ parallel toBuild
+ }
+ stage('Test Packages') {
+ parallel toTest
+ }
+ stage('Deploy Packages') {
+ parallel toDeploy
+ }
+}
+,
+failure_handler: {
+ if (currentBuild.result == "FAILURE") {
+ // emailext body: 'Generating the nightly maven has failed. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[NIGHTLY MAVEN FAILED] Build ${BUILD_NUMBER}', to: '${EMAIL}'
+ }
+}
+)
diff --git a/ci/publish/scala/build.sh b/ci/publish/scala/build.sh
new file mode 100755
index 000000000000..17f969afe142
--- /dev/null
+++ b/ci/publish/scala/build.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+# Setup Environment Variables
+# MAVEN_PUBLISH_OS_TYPE: linux-x86_64-cpu|linux-x86_64-gpu|osx-x86_64-cpu
+# export MAVEN_PUBLISH_OS_TYPE=linux-x86_64-cpu
+
+bash scala-package/dev/compile-mxnet-backend.sh $MAVEN_PUBLISH_OS_TYPE ./
+
+# Compile tests for discovery later
+cd scala-package
+mvn -B deploy
diff --git a/ci/publish/scala/buildkey.py b/ci/publish/scala/buildkey.py
new file mode 100644
index 000000000000..8a1b7bf63286
--- /dev/null
+++ b/ci/publish/scala/buildkey.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import json
+import logging
+import subprocess
+
+HOME = os.environ['HOME']
+KEY_PATH = os.path.join(HOME, ".m2")
+
+
+'''
+This file would do the following items:
+ Import keys from AWS Credential services
+ Create settings.xml in .m2 with pass phrase
+ Create security-settings.xml in .m2 with master password
+ Import keys.asc the encrypted keys in gpg
+'''
+
+
+def getCredentials():
+ import boto3
+ import botocore
+ endpoint_url = os.environ['MAVEN_PUBLISH_SECRET_ENDPOINT_URL']
+ secret_creds_name = os.environ['MAVEN_PUBLISH_SECRET_NAME_CREDENTIALS']
+ secret_key_name = os.environ['MAVEN_PUBLISH_SECRET_NAME_GPG']
+ region_name = os.environ['DOCKERHUB_SECRET_ENDPOINT_REGION']
+
+ session = boto3.Session()
+ client = session.client(
+ service_name='secretsmanager',
+ region_name=region_name,
+ endpoint_url=endpoint_url
+ )
+ try:
+ get_secret_value_response = client.get_secret_value(
+ SecretId=secret_creds_name
+ )
+ get_secret_key_response = client.get_secret_value(
+ SecretId=secret_key_name
+ )
+ except botocore.exceptions.ClientError as client_error:
+ if client_error.response['Error']['Code'] == 'ResourceNotFoundException':
+ name = (secret_key_name if get_secret_value_response
+ else secret_creds_name)
+ logging.exception("The requested secret %s was not found", name)
+ elif client_error.response['Error']['Code'] == 'InvalidRequestException':
+ logging.exception("The request was invalid due to:")
+ elif client_error.response['Error']['Code'] == 'InvalidParameterException':
+ logging.exception("The request had invalid params:")
+ raise
+ else:
+ secret = get_secret_value_response['SecretString']
+ secret_dict = json.loads(secret)
+ secret_key = get_secret_key_response['SecretString']
+ return secret_dict, secret_key
+
+
+def importASC(key, gpgPassphrase):
+ filename = os.path.join(KEY_PATH, "key.asc")
+ with open(filename, 'w') as f:
+ f.write(key)
+ subprocess.check_output(['gpg2', '--batch', '--yes',
+ '--passphrase-fd', '0',
+ "--import", "{}".format(filename)],
+ input=str.encode(gpgPassphrase))
+
+
+def encryptMasterPSW(password):
+ filename = os.path.join(KEY_PATH, "encryptMasterPassword.exp")
+ with open(filename, 'w') as f:
+ f.write('''
+ spawn mvn --encrypt-master-password
+ expect -exact "Master password: "
+ send -- "{}\r"
+ expect eof
+ '''.format(password))
+ result = subprocess.check_output(['expect', filename])
+ return str(result).split('\r\n')[-1][2:-3]
+
+
+def encryptPSW(password):
+ filename = os.path.join(KEY_PATH, "encryptPassword.exp")
+ with open(filename, 'w') as f:
+ f.write('''
+ spawn mvn --encrypt-password
+ expect -exact "Password: "
+ send -- "{}\r"
+ expect eof
+ '''.format(password))
+ result = subprocess.check_output(['expect', filename])
+ return str(result).split('\r\n')[-1][2:-3]
+
+
+def masterPSW(password):
+ with open(os.path.join(KEY_PATH, "settings-security.xml"), "w") as f:
+ f.write("\n {}\n"
+ .format(password))
+
+
+def serverPSW(username, password, gpgPassphrase):
+ with open(os.path.join(KEY_PATH, "settings.xml"), "w") as f:
+ settingsString = '''
+
+
+
+
+
+ apache.snapshots.https
+ {}
+ {}
+
+
+
+ apache.releases.https
+ {}
+ {}
+
+
+
+
+
+ gpg
+
+ gpg2
+ {}
+ true
+
+
+
+
+ gpg
+
+ '''.format(username, password, username, password, gpgPassphrase)
+ f.write(settingsString)
+
+
+if __name__ == "__main__":
+ if not os.path.exists(KEY_PATH):
+ os.makedirs(KEY_PATH)
+ credentials, gpgKey = getCredentials()
+ masterPass = encryptMasterPSW(credentials['masterpass'])
+ masterPSW(masterPass)
+ passwordEncrypted = encryptPSW(credentials['password'])
+ serverPSW(credentials['user'], passwordEncrypted,
+ credentials['gpgPassphrase'])
+ importASC(gpgKey, credentials['gpgPassphrase'])
diff --git a/ci/publish/scala/deploy.sh b/ci/publish/scala/deploy.sh
new file mode 100755
index 000000000000..4eb33907eeb5
--- /dev/null
+++ b/ci/publish/scala/deploy.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+# Setup Environment Variables
+# MAVEN_PUBLISH_OS_TYPE: linux-x86_64-cpu|linux-x86_64-gpu|osx-x86_64-cpu
+# export MAVEN_PUBLISH_OS_TYPE=linux-x86_64-cpu
+
+# Run python to configure keys
+python3 ci/publish/scala/buildkey.py
+
+# Updating cache
+mkdir -p ~/.gnupg
+echo "default-cache-ttl 14400" > ~/.gnupg/gpg-agent.conf
+echo "max-cache-ttl 14400" >> ~/.gnupg/gpg-agent.conf
+echo "allow-loopback-pinentry" >> ~/.gnupg/gpg-agent.conf
+echo "pinentry-mode loopback" >> ~/.gnupg/gpg-agent.conf
+export GPG_TTY=$(tty)
+
+cd scala-package
+
+mvn -B deploy -Pnightly
+
+# Clear all password .xml files, exp files, and gpg key files
+rm -rf ~/.m2/*.xml ~/.m2/key.asc ~/.m2/*.exp
diff --git a/ci/publish/scala/fullDeploy.sh b/ci/publish/scala/fullDeploy.sh
new file mode 100644
index 000000000000..69d674a97497
--- /dev/null
+++ b/ci/publish/scala/fullDeploy.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+./ci/publish/scala/build.sh
+./ci/publish/scala/test.sh
+./ci/publish/scala/deploy.sh
diff --git a/ci/publish/scala/test.sh b/ci/publish/scala/test.sh
new file mode 100755
index 000000000000..5cef35ca3c2b
--- /dev/null
+++ b/ci/publish/scala/test.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+if [ -z "$JAVA_HOME" ]; then
+ source /etc/profile
+fi
+
+# Test
+cd scala-package/packageTest
+# make testlocal CI=1
+make testsnapshot UNIT=1 CI=1
diff --git a/ci/test_docker_cache.py b/ci/test_docker_cache.py
index 358d54985aca..0a3bc4640c05 100644
--- a/ci/test_docker_cache.py
+++ b/ci/test_docker_cache.py
@@ -135,7 +135,7 @@ def test_full_cache(self):
"""
platform = 'test_full_cache'
docker_tag = build_util.get_docker_tag(platform=platform, registry=DOCKER_REGISTRY_PATH)
- dockerfile_path = os.path.join(DOCKERFILE_DIR, 'Dockerfile.build.' + platform)
+ dockerfile_path = os.path.join(DOCKERFILE_DIR, 'Dockerfile.' + platform)
try:
with open(dockerfile_path, 'w') as dockerfile_handle:
dockerfile_handle.write(dockerfile_content)
@@ -196,7 +196,7 @@ def test_partial_cache(self):
"""
platform = 'test_partial_cache'
docker_tag = build_util.get_docker_tag(platform=platform, registry=DOCKER_REGISTRY_PATH)
- dockerfile_path = os.path.join(DOCKERFILE_DIR, 'Dockerfile.build.' + platform)
+ dockerfile_path = os.path.join(DOCKERFILE_DIR, 'Dockerfile.' + platform)
try:
# Write initial Dockerfile
with open(dockerfile_path, 'w') as dockerfile_handle:
diff --git a/ci/util.py b/ci/util.py
index 4d68b57a3af4..9a8d52eb1716 100644
--- a/ci/util.py
+++ b/ci/util.py
@@ -18,7 +18,6 @@
import os
import contextlib
import logging
-import requests
def get_mxnet_root() -> str:
curpath = os.path.abspath(os.path.dirname(__file__))
@@ -89,6 +88,7 @@ def under_ci() -> bool:
def ec2_instance_id_hostname() -> str:
+ import requests
if under_ci():
result = []
try:
diff --git a/cmake/cmake_options.yml b/cmake/cmake_options.yml
index 01446f7b8f28..a4323feb92d4 100644
--- a/cmake/cmake_options.yml
+++ b/cmake/cmake_options.yml
@@ -16,7 +16,7 @@
# under the License.
--- # CMake configuration
-USE_CUDA: "ON" # Build with CUDA support
+USE_CUDA: "OFF" # Build with CUDA support
USE_OLDCMAKECUDA: "OFF" # Build with old cmake cuda
USE_NCCL: "OFF" # Use NVidia NCCL with CUDA
USE_OPENCV: "ON" # Build with OpenCV support
@@ -48,3 +48,6 @@ USE_TENSORRT: "OFF" # Enable infeference optimization with TensorRT.
USE_ASAN: "OFF" # Enable Clang/GCC ASAN sanitizers.
ENABLE_TESTCOVERAGE: "OFF" # Enable compilation with test coverage metric output
CMAKE_BUILD_TYPE: "Debug"
+CMAKE_CUDA_COMPILER_LAUNCHER: "ccache"
+CMAKE_C_COMPILER_LAUNCHER: "ccache"
+CMAKE_CXX_COMPILER_LAUNCHER: "ccache"
diff --git a/contrib/clojure-package/README.md b/contrib/clojure-package/README.md
index 152c9c635e6d..ba6160aed5c8 100644
--- a/contrib/clojure-package/README.md
+++ b/contrib/clojure-package/README.md
@@ -105,9 +105,12 @@ brew install opencv
- Create a new project with `lein new my-mxnet`
- Edit your `project.clj` and add one of the following entries to `:dependencies`, based on your system and the compute device you want to use:
- - `[org.apache.mxnet.contrib.clojure/clojure-mxnet-linux-cpu "1.3.1"]`
- - `[org.apache.mxnet.contrib.clojure/clojure-mxnet-linux-gpu "1.3.1"]`
- - `[org.apache.mxnet.contrib.clojure/clojure-mxnet-osx-cpu "1.3.1"]`
+
+ - `[org.apache.mxnet.contrib.clojure/clojure-mxnet-linux-cpu ]`
+ - `[org.apache.mxnet.contrib.clojure/clojure-mxnet-linux-gpu ]`
+ - `[org.apache.mxnet.contrib.clojure/clojure-mxnet-osx-cpu ]`
+
+You can find the latest version out on [maven central- clojure-mxnet latest](https://search.maven.org/search?q=clojure-mxnet)
After making this change and running `lein deps`, you should be able to run example code like this [NDArray Tutorial](/~https://github.com/apache/incubator-mxnet/blob/master/contrib/clojure-package/examples/tutorial/src/tutorial/ndarray.clj).
@@ -116,38 +119,58 @@ After making this change and running `lein deps`, you should be able to run exam
With this option, you will install a Git revision of the Clojure package source and a [Scala package jar from Maven](https://search.maven.org/search?q=g:org.apache.mxnet) with native dependencies baked in.
- Install additional dependencies as described in [the corresponding section for Option 1](#installing-additional-dependencies),
-- Recursively clone the MXNet repository and checkout the desired revision. Here we assume the `1.3.1` tag and a clone into the `~/mxnet` directory:
+
+- Recursively clone the MXNet repository and checkout the desired version, (example 1.3.1). You should use the latest [version](https://search.maven.org/search?q=clojure-mxnet)), and a clone into the `~/mxnet` directory:
```bash
git clone --recursive /~https://github.com/apache/incubator-mxnet.git ~/mxnet
cd ~/mxnet
git tag --list # Find the tag that matches the Scala package version
- git checkout tags/1.3.1 -b my_mxnet
+
+ git checkout tags/ -b my_mxnet
git submodule update --init --recursive
cd contrib/clojure
```
- Edit `project.clj` to include the desired Scala jar from Maven:
- [org.apache.mxnet/mxnet-full_2.11-linux-x86_64-cpu "1.3.1”]
+
+ [org.apache.mxnet/mxnet-full_2.11-linux-x86_64-cpu ]
- Run `lein test`. All the tests should run without error.
- At this point you can run `lein install` to build and install the Clojure jar locally.
To run examples, you can now use `lein run` in any of the example directories, e.g., `examples/imclassification`. You can also specify the compute device, e.g., `lein run :cpu 2` (for 2 CPUs) or `lein run :gpu` (for 1 GPU).
-**Note:** Instead of a release tag, you can also use a development version of the Clojure package, e.g., Git `master`, together with the prebuilt Scala jar. In that case, however, breakage can happen at any point, for instance when the Scala development version adds, changes or removes an interface and the Clojure development version moves along. If you really need the most recent version, you should consider [installation option 3](#option-3-everything-from-source).
+#### Experimental: Using Scala Snapshot Jars
+**Note:** Instead of a release tag, you can also use a development version of the Clojure package, e.g., Git `master`, together with the prebuilt Scala jar. There is a repo of nightly built snapshots of Scala jars. You can use them in your `project.clj` by adding a repository:
+
+```
+["snapshots" {:url "https://repository.apache.org/content/repositories/snapshots"
+ :snapshots true
+ :sign-releases false
+ :checksum :fail
+ :update :always
+ :releases {:checksum :fail :update :always}}]
+```
+
+Then you should be able to run with your dependency:
+
+ [org.apache.mxnet/mxnet-full_2.11-osx-x86_64-cpu "latest-version-SNAPSHOT"]
+
+
+In that case, however, breakage can happen at any point, for instance when the Scala development version adds, changes or removes an interface and the Clojure development version moves along. If you really need the most recent version, you should consider [installation option 3](#option-3-everything-from-source).
### Option 3: Everything from Source
With this option, you will compile the core MXNet C++ package and jars for both Scala and Clojure language bindings from source. If you intend to make changes to the code in any of the parts, or if you simply want the latest and greatest features, this choice is for you.
-The first step is to recursively clone the MXNet repository and checkout the desired revision. Here we assume a clone into the `~/mxnet` directory:
+The first step is to recursively clone the MXNet repository and checkout the desired version, (example 1.3.1). You should use the latest [version](https://search.maven.org/search?q=clojure-mxnet)), and clone into the `~/mxnet` directory:
```bash
git clone --recursive /~https://github.com/apache/incubator-mxnet.git ~/mxnet
cd ~/mxnet
- git checkout tags/1.3.1 -b my_mxnet # this is optional
+ git checkout tags/version -b my_mxnet # this is optional
git submodule update --init --recursive
```
@@ -170,13 +193,13 @@ The outcome of this step will be a shared library `lib/libmxnet.so` that is used
- Build and install the Scala package in your local Maven directory using the following commands:
```bash
- make scalapkg
- make scalainstall
+ cd scala-package
+ mvn install
```
#### Building the Clojure jar
-- Enter the `contrib/clojure` directory and edit the `project.clj` file. Add the Scala jar that was just created and installed, e.g., `[org.apache.mxnet/mxnet-full_2.11-osx-x86_64-cpu "1.5.0-SNAPSHOT"]`, to the `:dependencies`.
+- Enter the `contrib/clojure` directory and edit the `project.clj` file. Add the Scala jar that was just created and installed, e.g., `[org.apache.mxnet/mxnet-full_2.11-osx-x86_64-cpu "latest-version-SNAPSHOT"]`, to the `:dependencies`.
- Run `lein test`. All the tests should run without an error.
- Run `lein install` to build and install the Clojure jar locally.
diff --git a/contrib/clojure-package/examples/captcha/.gitignore b/contrib/clojure-package/examples/captcha/.gitignore
new file mode 100644
index 000000000000..e1569bd89020
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/.gitignore
@@ -0,0 +1,3 @@
+/.lein-*
+/.nrepl-port
+images/*
diff --git a/contrib/clojure-package/examples/captcha/README.md b/contrib/clojure-package/examples/captcha/README.md
new file mode 100644
index 000000000000..6b593b2f1c65
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/README.md
@@ -0,0 +1,61 @@
+# Captcha
+
+This is the clojure version of [captcha recognition](/~https://github.com/xlvector/learning-dl/tree/master/mxnet/ocr)
+example by xlvector and mirrors the R captcha example. It can be used as an
+example of multi-label training. For the following captcha example, we consider it as an
+image with 4 labels and train a CNN over the data set.
+
+![captcha example](captcha_example.png)
+
+## Installation
+
+Before you run this example, make sure that you have the clojure package
+installed. In the main clojure package directory, do `lein install`.
+Then you can run `lein install` in this directory.
+
+## Usage
+
+### Training
+
+First the OCR model needs to be trained based on [labeled data](https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/captcha_example.zip).
+The training can be started using the following:
+```
+$ lein train [:cpu|:gpu] [num-devices]
+```
+This downloads the training/evaluation data using the `get_data.sh` script
+before starting training.
+
+It is possible that you will encounter some out-of-memory issues while training using :gpu on Ubuntu
+linux (18.04). However, the command `lein train` (training on one CPU) may resolve the issue.
+
+The training runs for 10 iterations by default and saves the model with the
+prefix `ocr-`. The model achieved an exact match accuracy of ~0.954 and
+~0.628 on training and validation data respectively.
+
+### Inference
+
+Once the model has been saved, it can be used for prediction. This can be done
+by running:
+```
+$ lein infer
+INFO MXNetJVM: Try loading mxnet-scala from native path.
+INFO MXNetJVM: Try loading mxnet-scala-linux-x86_64-gpu from native path.
+INFO MXNetJVM: Try loading mxnet-scala-linux-x86_64-cpu from native path.
+WARN MXNetJVM: MXNet Scala native library not found in path. Copying native library from the archive. Consider installing the library somewhere in the path (for Windows: PATH, for Linux: LD_LIBRARY_PATH), or specifying by Java cmd option -Djava.library.path=[lib path].
+WARN org.apache.mxnet.DataDesc: Found Undefined Layout, will use default index 0 for batch axis
+INFO org.apache.mxnet.infer.Predictor: Latency increased due to batchSize mismatch 8 vs 1
+WARN org.apache.mxnet.DataDesc: Found Undefined Layout, will use default index 0 for batch axis
+WARN org.apache.mxnet.DataDesc: Found Undefined Layout, will use default index 0 for batch axis
+CAPTCHA output: 6643
+INFO org.apache.mxnet.util.NativeLibraryLoader: Deleting /tmp/mxnet6045308279291774865/libmxnet.so
+INFO org.apache.mxnet.util.NativeLibraryLoader: Deleting /tmp/mxnet6045308279291774865/mxnet-scala
+INFO org.apache.mxnet.util.NativeLibraryLoader: Deleting /tmp/mxnet6045308279291774865
+```
+The model runs on `captcha_example.png` by default.
+
+It can be run on other generated captcha images as well. The script
+`gen_captcha.py` generates random captcha images for length 4.
+Before running the python script, you will need to install the [captcha](https://pypi.org/project/captcha/)
+library using `pip3 install --user captcha`. The captcha images are generated
+in the `images/` folder and we can run the prediction using
+`lein infer images/7534.png`.
diff --git a/contrib/clojure-package/examples/captcha/captcha_example.png b/contrib/clojure-package/examples/captcha/captcha_example.png
new file mode 100644
index 000000000000..09b84f7190fa
Binary files /dev/null and b/contrib/clojure-package/examples/captcha/captcha_example.png differ
diff --git a/contrib/clojure-package/examples/captcha/gen_captcha.py b/contrib/clojure-package/examples/captcha/gen_captcha.py
new file mode 100755
index 000000000000..43e0d26fb961
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/gen_captcha.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python3
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from captcha.image import ImageCaptcha
+import os
+import random
+
+length = 4
+width = 160
+height = 60
+IMAGE_DIR = "images"
+
+
+def random_text():
+ return ''.join(str(random.randint(0, 9))
+ for _ in range(length))
+
+
+if __name__ == '__main__':
+ image = ImageCaptcha(width=width, height=height)
+ captcha_text = random_text()
+ if not os.path.exists(IMAGE_DIR):
+ os.makedirs(IMAGE_DIR)
+ image.write(captcha_text, os.path.join(IMAGE_DIR, captcha_text + ".png"))
diff --git a/contrib/clojure-package/examples/captcha/get_data.sh b/contrib/clojure-package/examples/captcha/get_data.sh
new file mode 100755
index 000000000000..baa7f9eb818f
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/get_data.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -evx
+
+EXAMPLE_ROOT=$(cd "$(dirname $0)"; pwd)
+
+data_path=$EXAMPLE_ROOT
+
+if [ ! -f "$data_path/captcha_example.zip" ]; then
+ wget https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/captcha_example.zip -P $data_path
+fi
+
+if [ ! -f "$data_path/captcha_example/captcha_train.rec" ]; then
+ unzip $data_path/captcha_example.zip -d $data_path
+fi
diff --git a/contrib/clojure-package/examples/captcha/project.clj b/contrib/clojure-package/examples/captcha/project.clj
new file mode 100644
index 000000000000..fa37fecbe035
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/project.clj
@@ -0,0 +1,28 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(defproject captcha "0.1.0-SNAPSHOT"
+ :description "Captcha recognition via multi-label classification"
+ :plugins [[lein-cljfmt "0.5.7"]]
+ :dependencies [[org.clojure/clojure "1.9.0"]
+ [org.apache.mxnet.contrib.clojure/clojure-mxnet "1.5.0-SNAPSHOT"]]
+ :main ^:skip-aot captcha.train-ocr
+ :profiles {:train {:main captcha.train-ocr}
+ :infer {:main captcha.infer-ocr}
+ :uberjar {:aot :all}}
+ :aliases {"train" ["with-profile" "train" "run"]
+ "infer" ["with-profile" "infer" "run"]})
diff --git a/contrib/clojure-package/examples/captcha/src/captcha/consts.clj b/contrib/clojure-package/examples/captcha/src/captcha/consts.clj
new file mode 100644
index 000000000000..318e0d806873
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/src/captcha/consts.clj
@@ -0,0 +1,27 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns captcha.consts)
+
+(def batch-size 8)
+(def channels 3)
+(def height 30)
+(def width 80)
+(def data-shape [channels height width])
+(def num-labels 10)
+(def label-width 4)
+(def model-prefix "ocr")
diff --git a/contrib/clojure-package/examples/captcha/src/captcha/infer_ocr.clj b/contrib/clojure-package/examples/captcha/src/captcha/infer_ocr.clj
new file mode 100644
index 000000000000..f6a648e9867b
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/src/captcha/infer_ocr.clj
@@ -0,0 +1,56 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns captcha.infer-ocr
+ (:require [captcha.consts :refer :all]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [org.apache.clojure-mxnet.ndarray :as ndarray]))
+
+(defn create-predictor
+ []
+ (let [data-desc {:name "data"
+ :shape [batch-size channels height width]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}
+ label-desc {:name "label"
+ :shape [batch-size label-width]
+ :layout layout/NT
+ :dtype dtype/FLOAT32}
+ factory (infer/model-factory model-prefix
+ [data-desc label-desc])]
+ (infer/create-predictor factory)))
+
+(defn -main
+ [& args]
+ (let [[filename] args
+ image-fname (or filename "captcha_example.png")
+ image-ndarray (-> image-fname
+ infer/load-image-from-file
+ (infer/reshape-image width height)
+ (infer/buffered-image-to-pixels [channels height width])
+ (ndarray/expand-dims 0))
+ label-ndarray (ndarray/zeros [1 label-width])
+ predictor (create-predictor)
+ predictions (-> (infer/predict-with-ndarray
+ predictor
+ [image-ndarray label-ndarray])
+ first
+ (ndarray/argmax 1)
+ ndarray/->vec)]
+ (println "CAPTCHA output:" (apply str (mapv int predictions)))))
diff --git a/contrib/clojure-package/examples/captcha/src/captcha/train_ocr.clj b/contrib/clojure-package/examples/captcha/src/captcha/train_ocr.clj
new file mode 100644
index 000000000000..91ec2fff3af7
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/src/captcha/train_ocr.clj
@@ -0,0 +1,156 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns captcha.train-ocr
+ (:require [captcha.consts :refer :all]
+ [clojure.java.io :as io]
+ [clojure.java.shell :refer [sh]]
+ [org.apache.clojure-mxnet.callback :as callback]
+ [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.eval-metric :as eval-metric]
+ [org.apache.clojure-mxnet.initializer :as initializer]
+ [org.apache.clojure-mxnet.io :as mx-io]
+ [org.apache.clojure-mxnet.module :as m]
+ [org.apache.clojure-mxnet.ndarray :as ndarray]
+ [org.apache.clojure-mxnet.optimizer :as optimizer]
+ [org.apache.clojure-mxnet.symbol :as sym])
+ (:gen-class))
+
+(when-not (.exists (io/file "captcha_example/captcha_train.lst"))
+ (sh "./get_data.sh"))
+
+(defonce train-data
+ (mx-io/image-record-iter {:path-imgrec "captcha_example/captcha_train.rec"
+ :path-imglist "captcha_example/captcha_train.lst"
+ :batch-size batch-size
+ :label-width label-width
+ :data-shape data-shape
+ :shuffle true
+ :seed 42}))
+
+(defonce eval-data
+ (mx-io/image-record-iter {:path-imgrec "captcha_example/captcha_test.rec"
+ :path-imglist "captcha_example/captcha_test.lst"
+ :batch-size batch-size
+ :label-width label-width
+ :data-shape data-shape}))
+
+(defn accuracy
+ [label pred & {:keys [by-character]
+ :or {by-character false} :as opts}]
+ (let [[nr nc] (ndarray/shape-vec label)
+ pred-context (ndarray/context pred)
+ label-t (-> label
+ ndarray/transpose
+ (ndarray/reshape [-1])
+ (ndarray/as-in-context pred-context))
+ pred-label (ndarray/argmax pred 1)
+ matches (ndarray/equal label-t pred-label)
+ [digit-matches] (-> matches
+ ndarray/sum
+ ndarray/->vec)
+ [complete-matches] (-> matches
+ (ndarray/reshape [nc nr])
+ (ndarray/sum 0)
+ (ndarray/equal label-width)
+ ndarray/sum
+ ndarray/->vec)]
+ (if by-character
+ (float (/ digit-matches nr nc))
+ (float (/ complete-matches nr)))))
+
+(defn get-data-symbol
+ []
+ (let [data (sym/variable "data")
+ ;; normalize the input pixels
+ scaled (sym/div (sym/- data 127) 128)
+
+ conv1 (sym/convolution {:data scaled :kernel [5 5] :num-filter 32})
+ pool1 (sym/pooling {:data conv1 :pool-type "max" :kernel [2 2] :stride [1 1]})
+ relu1 (sym/activation {:data pool1 :act-type "relu"})
+
+ conv2 (sym/convolution {:data relu1 :kernel [5 5] :num-filter 32})
+ pool2 (sym/pooling {:data conv2 :pool-type "avg" :kernel [2 2] :stride [1 1]})
+ relu2 (sym/activation {:data pool2 :act-type "relu"})
+
+ conv3 (sym/convolution {:data relu2 :kernel [3 3] :num-filter 32})
+ pool3 (sym/pooling {:data conv3 :pool-type "avg" :kernel [2 2] :stride [1 1]})
+ relu3 (sym/activation {:data pool3 :act-type "relu"})
+
+ conv4 (sym/convolution {:data relu3 :kernel [3 3] :num-filter 32})
+ pool4 (sym/pooling {:data conv4 :pool-type "avg" :kernel [2 2] :stride [1 1]})
+ relu4 (sym/activation {:data pool4 :act-type "relu"})
+
+ flattened (sym/flatten {:data relu4})
+ fc1 (sym/fully-connected {:data flattened :num-hidden 256})
+ fc21 (sym/fully-connected {:data fc1 :num-hidden num-labels})
+ fc22 (sym/fully-connected {:data fc1 :num-hidden num-labels})
+ fc23 (sym/fully-connected {:data fc1 :num-hidden num-labels})
+ fc24 (sym/fully-connected {:data fc1 :num-hidden num-labels})]
+ (sym/concat "concat" nil [fc21 fc22 fc23 fc24] {:dim 0})))
+
+(defn get-label-symbol
+ []
+ (as-> (sym/variable "label") label
+ (sym/transpose {:data label})
+ (sym/reshape {:data label :shape [-1]})))
+
+(defn create-captcha-net
+ []
+ (let [scores (get-data-symbol)
+ labels (get-label-symbol)]
+ (sym/softmax-output {:data scores :label labels})))
+
+(def optimizer
+ (optimizer/adam
+ {:learning-rate 0.0002
+ :wd 0.00001
+ :clip-gradient 10}))
+
+(defn train-ocr
+ [devs]
+ (println "Starting the captcha training ...")
+ (let [model (m/module
+ (create-captcha-net)
+ {:data-names ["data"] :label-names ["label"]
+ :contexts devs})]
+ (m/fit model {:train-data train-data
+ :eval-data eval-data
+ :num-epoch 10
+ :fit-params (m/fit-params
+ {:kvstore "local"
+ :batch-end-callback
+ (callback/speedometer batch-size 100)
+ :initializer
+ (initializer/xavier {:factor-type "in"
+ :magnitude 2.34})
+ :optimizer optimizer
+ :eval-metric (eval-metric/custom-metric
+ #(accuracy %1 %2)
+ "accuracy")})})
+ (println "Finished the fit")
+ model))
+
+(defn -main
+ [& args]
+ (let [[dev dev-num] args
+ num-devices (Integer/parseInt (or dev-num "1"))
+ devs (if (= dev ":gpu")
+ (mapv #(context/gpu %) (range num-devices))
+ (mapv #(context/cpu %) (range num-devices)))
+ model (train-ocr devs)]
+ (m/save-checkpoint model {:prefix model-prefix :epoch 0})))
diff --git a/contrib/clojure-package/examples/captcha/test/captcha/train_ocr_test.clj b/contrib/clojure-package/examples/captcha/test/captcha/train_ocr_test.clj
new file mode 100644
index 000000000000..ab785f7fedf2
--- /dev/null
+++ b/contrib/clojure-package/examples/captcha/test/captcha/train_ocr_test.clj
@@ -0,0 +1,119 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns captcha.train-ocr-test
+ (:require [clojure.test :refer :all]
+ [captcha.consts :refer :all]
+ [captcha.train-ocr :refer :all]
+ [org.apache.clojure-mxnet.io :as mx-io]
+ [org.apache.clojure-mxnet.module :as m]
+ [org.apache.clojure-mxnet.ndarray :as ndarray]
+ [org.apache.clojure-mxnet.shape :as shape]
+ [org.apache.clojure-mxnet.util :as util]))
+
+(deftest test-consts
+ (is (= 8 batch-size))
+ (is (= [3 30 80] data-shape))
+ (is (= 4 label-width))
+ (is (= 10 num-labels)))
+
+(deftest test-labeled-data
+ (let [train-batch (mx-io/next train-data)
+ eval-batch (mx-io/next eval-data)
+ allowed-labels (into #{} (map float (range 10)))]
+ (is (= 8 (-> train-batch mx-io/batch-index count)))
+ (is (= 8 (-> eval-batch mx-io/batch-index count)))
+ (is (= [8 3 30 80] (-> train-batch
+ mx-io/batch-data
+ first
+ ndarray/shape-vec)))
+ (is (= [8 3 30 80] (-> eval-batch
+ mx-io/batch-data
+ first
+ ndarray/shape-vec)))
+ (is (every? #(<= 0 % 255) (-> train-batch
+ mx-io/batch-data
+ first
+ ndarray/->vec)))
+ (is (every? #(<= 0 % 255) (-> eval-batch
+ mx-io/batch-data
+ first
+ ndarray/->vec)))
+ (is (= [8 4] (-> train-batch
+ mx-io/batch-label
+ first
+ ndarray/shape-vec)))
+ (is (= [8 4] (-> eval-batch
+ mx-io/batch-label
+ first
+ ndarray/shape-vec)))
+ (is (every? allowed-labels (-> train-batch
+ mx-io/batch-label
+ first
+ ndarray/->vec)))
+ (is (every? allowed-labels (-> eval-batch
+ mx-io/batch-label
+ first
+ ndarray/->vec)))))
+
+(deftest test-model
+ (let [batch (mx-io/next train-data)
+ model (m/module (create-captcha-net)
+ {:data-names ["data"] :label-names ["label"]})
+ _ (m/bind model
+ {:data-shapes (mx-io/provide-data-desc train-data)
+ :label-shapes (mx-io/provide-label-desc train-data)})
+ _ (m/init-params model)
+ _ (m/forward-backward model batch)
+ output-shapes (-> model
+ m/output-shapes
+ util/coerce-return-recursive)
+ outputs (-> model
+ m/outputs-merged
+ first)
+ grads (->> model m/grad-arrays (map first))]
+ (is (= [["softmaxoutput0_output" (shape/->shape [8 10])]]
+ output-shapes))
+ (is (= [32 10] (-> outputs ndarray/shape-vec)))
+ (is (every? #(<= 0.0 % 1.0) (-> outputs ndarray/->vec)))
+ (is (= [[32 3 5 5] [32] ; convolution1 weights+bias
+ [32 32 5 5] [32] ; convolution2 weights+bias
+ [32 32 3 3] [32] ; convolution3 weights+bias
+ [32 32 3 3] [32] ; convolution4 weights+bias
+ [256 28672] [256] ; fully-connected1 weights+bias
+ [10 256] [10] ; 1st label scores
+ [10 256] [10] ; 2nd label scores
+ [10 256] [10] ; 3rd label scores
+ [10 256] [10]] ; 4th label scores
+ (map ndarray/shape-vec grads)))))
+
+(deftest test-accuracy
+ (let [labels (ndarray/array [1 2 3 4,
+ 5 6 7 8]
+ [2 4])
+ pred-labels (ndarray/array [1 0,
+ 2 6,
+ 3 0,
+ 4 8]
+ [8])
+ preds (ndarray/one-hot pred-labels 10)]
+ (is (float? (accuracy labels preds)))
+ (is (float? (accuracy labels preds :by-character false)))
+ (is (float? (accuracy labels preds :by-character true)))
+ (is (= 0.5 (accuracy labels preds)))
+ (is (= 0.5 (accuracy labels preds :by-character false)))
+ (is (= 0.75 (accuracy labels preds :by-character true)))))
diff --git a/contrib/clojure-package/examples/infer/imageclassifier/.gitignore b/contrib/clojure-package/examples/infer/imageclassifier/.gitignore
new file mode 100644
index 000000000000..35491f1a084a
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/imageclassifier/.gitignore
@@ -0,0 +1,12 @@
+/target
+/classes
+/checkouts
+/images
+pom.xml
+pom.xml.asc
+*.jar
+*.class
+/.lein-*
+/.nrepl-port
+.hgignore
+.hg/
diff --git a/contrib/clojure-package/examples/infer/imageclassifier/README.md b/contrib/clojure-package/examples/infer/imageclassifier/README.md
new file mode 100644
index 000000000000..a8328607c9a2
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/imageclassifier/README.md
@@ -0,0 +1,24 @@
+# imageclassifier
+
+Run image classification using clojure infer package.
+
+## Installation
+
+Before you run this example, make sure that you have the clojure package installed.
+In the main clojure package directory, do `lein install`. Then you can run
+`lein install` in this directory.
+
+## Usage
+
+```
+$ chmod +x scripts/get_resnet_18_data.sh
+$ ./scripts/get_resnet_18_data.sh
+$
+$ lein run -- --help
+$ lein run -- -m models/resnet-18/resnet-18 -i images/kitten.jpg -d images/
+$
+$ lein uberjar
+$ java -jar target/imageclassifier-0.1.0-SNAPSHOT-standalone.jar --help
+$ java -jar target/imageclassifier-0.1.0-SNAPSHOT-standalone.jar \
+ -m models/resnet-18/resnet-18 -i images/kitten.jpg -d images/
+```
diff --git a/contrib/clojure-package/examples/infer/imageclassifier/project.clj b/contrib/clojure-package/examples/infer/imageclassifier/project.clj
new file mode 100644
index 000000000000..2d5b171d9ab7
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/imageclassifier/project.clj
@@ -0,0 +1,25 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(defproject imageclassifier "0.1.0-SNAPSHOT"
+ :description "Image classification using infer with MXNet"
+ :plugins [[lein-cljfmt "0.5.7"]]
+ :dependencies [[org.clojure/clojure "1.9.0"]
+ [org.clojure/tools.cli "0.4.1"]
+ [org.apache.mxnet.contrib.clojure/clojure-mxnet "1.5.0-SNAPSHOT"]]
+ :main ^:skip-aot infer.imageclassifier-example
+ :profiles {:uberjar {:aot :all}})
diff --git a/contrib/clojure-package/examples/infer/imageclassifier/scripts/get_resnet_18_data.sh b/contrib/clojure-package/examples/infer/imageclassifier/scripts/get_resnet_18_data.sh
new file mode 100755
index 000000000000..1a142e8edbfd
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/imageclassifier/scripts/get_resnet_18_data.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -evx
+
+MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd)
+
+data_path=$MXNET_ROOT/models/resnet-18/
+
+image_path=$MXNET_ROOT/images/
+
+if [ ! -d "$data_path" ]; then
+ mkdir -p "$data_path"
+fi
+
+if [ ! -d "$image_path" ]; then
+ mkdir -p "$image_path"
+fi
+
+if [ ! -f "$data_path/resnet-18-0000.params" ]; then
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-symbol.json -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-0000.params -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/synset.txt -P $data_path
+fi
+
+if [ ! -f "$image_path/kitten.jpg" ]; then
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -P $image_path
+ wget https://s3.amazonaws.com/model-server/inputs/Pug-Cookie.jpg -P $image_path
+fi
diff --git a/contrib/clojure-package/examples/infer/imageclassifier/scripts/get_resnet_data.sh b/contrib/clojure-package/examples/infer/imageclassifier/scripts/get_resnet_data.sh
new file mode 100755
index 000000000000..fcef59bacc6f
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/imageclassifier/scripts/get_resnet_data.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+
+MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd)
+
+data_path=$MXNET_ROOT/models/resnet-152/
+
+image_path=$MXNET_ROOT/images/
+
+if [ ! -d "$data_path" ]; then
+ mkdir -p "$data_path"
+fi
+
+if [ ! -d "$image_path" ]; then
+ mkdir -p "$image_path"
+fi
+
+if [ ! -f "$data_path/resnet-152-0000.params" ]; then
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-0000.params -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-symbol.json -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/synset.txt -P $data_path
+fi
+
+if [ ! -f "$image_path/kitten.jpg" ]; then
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -P $image_path
+fi
diff --git a/contrib/clojure-package/examples/infer/imageclassifier/src/infer/imageclassifier_example.clj b/contrib/clojure-package/examples/infer/imageclassifier/src/infer/imageclassifier_example.clj
new file mode 100644
index 000000000000..4ec7ff7f1490
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/imageclassifier/src/infer/imageclassifier_example.clj
@@ -0,0 +1,112 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns infer.imageclassifier-example
+ (:require [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [clojure.java.io :as io]
+ [clojure.string :refer [join]]
+ [clojure.tools.cli :refer [parse-opts]])
+ (:gen-class))
+
+(defn check-valid-dir
+ "Check that the input directory exists"
+ [input-dir]
+ (let [dir (io/file input-dir)]
+ (and
+ (.exists dir)
+ (.isDirectory dir))))
+
+(defn check-valid-file
+ "Check that the file exists"
+ [input-file]
+ (.exists (io/file input-file)))
+
+(def cli-options
+ [["-m" "--model-path-prefix PREFIX" "Model path prefix"
+ :default "models/resnet-18/resnet-18"
+ :validate [#(check-valid-file (str % "-symbol.json"))
+ "Model path prefix is invalid"]]
+ ["-i" "--input-image IMAGE" "Input image"
+ :default "images/kitten.jpg"
+ :validate [check-valid-file "Input file not found"]]
+ ["-d" "--input-dir IMAGE_DIR" "Input directory"
+ :default "images/"
+ :validate [check-valid-dir "Input directory not found"]]
+ ["-h" "--help"]])
+
+(defn print-predictions
+ "Print image classifier predictions for the given input file"
+ [predictions]
+ (println (apply str (repeat 80 "=")))
+ (doseq [[label probability] predictions]
+ (println (format "Class: %s Probability=%.8f" label probability)))
+ (println (apply str (repeat 80 "="))))
+
+(defn classify-single-image
+ "Classify a single image and print top-5 predictions"
+ [classifier input-image]
+ (let [image (infer/load-image-from-file input-image)
+ topk 5
+ [predictions] (infer/classify-image classifier image topk)]
+ predictions))
+
+(defn classify-images-in-dir
+ "Classify all jpg images in the directory"
+ [classifier input-dir]
+ (let [batch-size 20
+ image-file-batches (->> input-dir
+ io/file
+ file-seq
+ (filter #(.isFile %))
+ (filter #(re-matches #".*\.jpg$" (.getPath %)))
+ (mapv #(.getPath %))
+ (partition-all batch-size))]
+ (apply
+ concat
+ (for [image-files image-file-batches]
+ (let [image-batch (infer/load-image-paths image-files)
+ topk 5]
+ (infer/classify-image-batch classifier image-batch topk))))))
+
+(defn run-classifier
+ "Runs an image classifier based on options provided"
+ [options]
+ (let [{:keys [model-path-prefix input-image input-dir]} options
+ descriptors [{:name "data"
+ :shape [1 3 224 224]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)
+ classifier (infer/create-image-classifier
+ factory {:contexts [(context/default-context)]})]
+ (println "Classifying a single image")
+ (print-predictions (classify-single-image classifier input-image))
+ (println "Classifying images in a directory")
+ (doseq [predictions (classify-images-in-dir classifier input-dir)]
+ (print-predictions predictions))))
+
+(defn -main
+ [& args]
+ (let [{:keys [options summary errors] :as opts}
+ (parse-opts args cli-options)]
+ (cond
+ (:help options) (println summary)
+ (some? errors) (println (join "\n" errors))
+ :else (run-classifier options))))
diff --git a/contrib/clojure-package/examples/infer/imageclassifier/test/infer/imageclassifier_example_test.clj b/contrib/clojure-package/examples/infer/imageclassifier/test/infer/imageclassifier_example_test.clj
new file mode 100644
index 000000000000..5b3e08d134f8
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/imageclassifier/test/infer/imageclassifier_example_test.clj
@@ -0,0 +1,69 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns infer.imageclassifier-example-test
+ (:require [infer.imageclassifier-example :refer [classify-single-image
+ classify-images-in-dir]]
+ [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [clojure.java.io :as io]
+ [clojure.java.shell :refer [sh]]
+ [clojure.test :refer :all]))
+
+(def model-dir "models/")
+(def image-dir "images/")
+(def model-path-prefix (str model-dir "resnet-18/resnet-18"))
+(def image-file (str image-dir "kitten.jpg"))
+
+(when-not (.exists (io/file (str model-path-prefix "-symbol.json")))
+ (sh "./scripts/get_resnet_18_data.sh"))
+
+(defn create-classifier []
+ (let [descriptors [{:name "data"
+ :shape [1 3 224 224]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)]
+ (infer/create-image-classifier factory)))
+
+(deftest test-single-classification
+ (let [classifier (create-classifier)
+ predictions (classify-single-image classifier image-file)]
+ (is (some? predictions))
+ (is (= 5 (count predictions)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(float? (second %)) predictions))
+ (is (every? #(< 0 (second %) 1) predictions))
+ (is (= ["n02123159 tiger cat"
+ "n02124075 Egyptian cat"
+ "n02123045 tabby, tabby cat"
+ "n02127052 lynx, catamount"
+ "n02128757 snow leopard, ounce, Panthera uncia"]
+ (map first predictions)))))
+
+(deftest test-batch-classification
+ (let [classifier (create-classifier)
+ batch-predictions (classify-images-in-dir classifier image-dir)
+ predictions (first batch-predictions)]
+ (is (some? batch-predictions))
+ (is (= 5 (count predictions)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(float? (second %)) predictions))
+ (is (every? #(< 0 (second %) 1) predictions))))
diff --git a/contrib/clojure-package/examples/infer/objectdetector/.gitignore b/contrib/clojure-package/examples/infer/objectdetector/.gitignore
new file mode 100644
index 000000000000..35491f1a084a
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/objectdetector/.gitignore
@@ -0,0 +1,12 @@
+/target
+/classes
+/checkouts
+/images
+pom.xml
+pom.xml.asc
+*.jar
+*.class
+/.lein-*
+/.nrepl-port
+.hgignore
+.hg/
diff --git a/contrib/clojure-package/examples/infer/objectdetector/README.md b/contrib/clojure-package/examples/infer/objectdetector/README.md
new file mode 100644
index 000000000000..921c53e046d3
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/objectdetector/README.md
@@ -0,0 +1,24 @@
+# objectdetector
+
+Run object detection on images using clojure infer package.
+
+## Installation
+
+Before you run this example, make sure that you have the clojure package installed.
+In the main clojure package directory, do `lein install`. Then you can run
+`lein install` in this directory.
+
+## Usage
+
+```
+$ chmod +x scripts/get_ssd_data.sh
+$ ./scripts/get_ssd_data.sh
+$
+$ lein run -- --help
+$ lein run -- -m models/resnet50_ssd/resnet50_ssd_model -i images/dog.jpg -d images/
+$
+$ lein uberjar
+$ java -jar target/objectdetector-0.1.0-SNAPSHOT-standalone.jar --help
+$ java -jar target/objectdetector-0.1.0-SNAPSHOT-standalone.jar \
+ -m models/resnet50_ssd/resnet50_ssd_model -i images/dog.jpg -d images/
+```
diff --git a/contrib/clojure-package/examples/infer/objectdetector/project.clj b/contrib/clojure-package/examples/infer/objectdetector/project.clj
new file mode 100644
index 000000000000..4501f14a358e
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/objectdetector/project.clj
@@ -0,0 +1,25 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(defproject objectdetector "0.1.0-SNAPSHOT"
+ :description "Object detection using infer with MXNet"
+ :plugins [[lein-cljfmt "0.5.7"]]
+ :dependencies [[org.clojure/clojure "1.9.0"]
+ [org.clojure/tools.cli "0.4.1"]
+ [org.apache.mxnet.contrib.clojure/clojure-mxnet "1.5.0-SNAPSHOT"]]
+ :main ^:skip-aot infer.objectdetector-example
+ :profiles {:uberjar {:aot :all}})
diff --git a/contrib/clojure-package/examples/infer/objectdetector/scripts/get_ssd_data.sh b/contrib/clojure-package/examples/infer/objectdetector/scripts/get_ssd_data.sh
new file mode 100755
index 000000000000..06440a28452e
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/objectdetector/scripts/get_ssd_data.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+set -e
+
+MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd)
+
+data_path=$MXNET_ROOT/models/resnet50_ssd
+
+image_path=$MXNET_ROOT/images
+
+if [ ! -d "$data_path" ]; then
+ mkdir -p "$data_path"
+fi
+
+if [ ! -d "$image_path" ]; then
+ mkdir -p "$image_path"
+fi
+
+if [ ! -f "$data_path/resnet50_ssd_model-0000.params" ]; then
+ wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-symbol.json -P $data_path
+ wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-0000.params -P $data_path
+ wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/synset.txt -P $data_path
+fi
+
+if [ ! -f "$image_path/000001.jpg" ]; then
+ cd $image_path
+ wget https://cloud.githubusercontent.com/assets/3307514/20012566/cbb53c76-a27d-11e6-9aaa-91939c9a1cd5.jpg -O 000001.jpg
+ wget https://cloud.githubusercontent.com/assets/3307514/20012567/cbb60336-a27d-11e6-93ff-cbc3f09f5c9e.jpg -O dog.jpg
+ wget https://cloud.githubusercontent.com/assets/3307514/20012563/cbb41382-a27d-11e6-92a9-18dab4fd1ad3.jpg -O person.jpg
+fi
+
diff --git a/contrib/clojure-package/examples/infer/objectdetector/src/infer/objectdetector_example.clj b/contrib/clojure-package/examples/infer/objectdetector/src/infer/objectdetector_example.clj
new file mode 100644
index 000000000000..53172f0c8cad
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/objectdetector/src/infer/objectdetector_example.clj
@@ -0,0 +1,121 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns infer.objectdetector-example
+ (:require [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [clojure.java.io :as io]
+ [clojure.string :refer [join]]
+ [clojure.tools.cli :refer [parse-opts]])
+ (:gen-class))
+
+(defn check-valid-dir
+ "Check that the input directory exists"
+ [input-dir]
+ (let [dir (io/file input-dir)]
+ (and
+ (.exists dir)
+ (.isDirectory dir))))
+
+(defn check-valid-file
+ "Check that the file exists"
+ [input-file]
+ (.exists (io/file input-file)))
+
+(def cli-options
+ [["-m" "--model-path-prefix PREFIX" "Model path prefix"
+ :default "models/resnet50_ssd/resnet50_ssd_model"
+ :validate [#(check-valid-file (str % "-symbol.json"))
+ "Model path prefix is invalid"]]
+ ["-i" "--input-image IMAGE" "Input image"
+ :default "images/dog.jpg"
+ :validate [check-valid-file "Input file not found"]]
+ ["-d" "--input-dir IMAGE_DIR" "Input directory"
+ :default "images/"
+ :validate [check-valid-dir "Input directory not found"]]
+ ["-h" "--help"]])
+
+(defn print-predictions
+ "Print image detector predictions for the given input file"
+ [predictions width height]
+ (println (apply str (repeat 80 "=")))
+ (doseq [[label prob-and-bounds] predictions]
+ (println (format
+ "Class: %s Prob=%.5f Coords=(%.3f, %.3f, %.3f, %.3f)"
+ label
+ (aget prob-and-bounds 0)
+ (* (aget prob-and-bounds 1) width)
+ (* (aget prob-and-bounds 2) height)
+ (* (aget prob-and-bounds 3) width)
+ (* (aget prob-and-bounds 4) height))))
+ (println (apply str (repeat 80 "="))))
+
+(defn detect-single-image
+ "Detect objects in a single image and print top-5 predictions"
+ [detector input-image]
+ (let [image (infer/load-image-from-file input-image)
+ topk 5
+ [predictions] (infer/detect-objects detector image topk)]
+ predictions))
+
+(defn detect-images-in-dir
+ "Detect objects in all jpg images in the directory"
+ [detector input-dir]
+ (let [batch-size 20
+ image-file-batches (->> input-dir
+ io/file
+ file-seq
+ (filter #(.isFile %))
+ (filter #(re-matches #".*\.jpg$" (.getPath %)))
+ (mapv #(.getPath %))
+ (partition-all batch-size))]
+ (apply
+ concat
+ (for [image-files image-file-batches]
+ (let [image-batch (infer/load-image-paths image-files)
+ topk 5]
+ (infer/detect-objects-batch detector image-batch topk))))))
+
+(defn run-detector
+ "Runs an image detector based on options provided"
+ [options]
+ (let [{:keys [model-path-prefix input-image input-dir
+ device device-id]} options
+ width 512 height 512
+ descriptors [{:name "data"
+ :shape [1 3 height width]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)
+ detector (infer/create-object-detector
+ factory
+ {:contexts [(context/default-context)]})]
+ (println "Object detection on a single image")
+ (print-predictions (detect-single-image detector input-image) width height)
+ (println "Object detection on images in a directory")
+ (doseq [predictions (detect-images-in-dir detector input-dir)]
+ (print-predictions predictions width height))))
+
+(defn -main
+ [& args]
+ (let [{:keys [options summary errors] :as opts}
+ (parse-opts args cli-options)]
+ (cond
+ (:help options) (println summary)
+ (some? errors) (println (join "\n" errors))
+ :else (run-detector options))))
diff --git a/contrib/clojure-package/examples/infer/objectdetector/test/infer/objectdetector_example_test.clj b/contrib/clojure-package/examples/infer/objectdetector/test/infer/objectdetector_example_test.clj
new file mode 100644
index 000000000000..90ed02f67a73
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/objectdetector/test/infer/objectdetector_example_test.clj
@@ -0,0 +1,65 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns infer.objectdetector-example-test
+ (:require [infer.objectdetector-example :refer [detect-single-image
+ detect-images-in-dir]]
+ [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [clojure.java.io :as io]
+ [clojure.java.shell :refer [sh]]
+ [clojure.test :refer :all]))
+
+(def model-dir "models/")
+(def image-dir "images/")
+(def model-path-prefix (str model-dir "resnet50_ssd/resnet50_ssd_model"))
+(def image-file (str image-dir "dog.jpg"))
+
+(when-not (.exists (io/file (str model-path-prefix "-symbol.json")))
+ (sh "./scripts/get_ssd_data.sh"))
+
+(defn create-detector []
+ (let [descriptors [{:name "data"
+ :shape [1 3 512 512]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)]
+ (infer/create-object-detector factory)))
+
+(deftest test-single-detection
+ (let [detector (create-detector)
+ predictions (detect-single-image detector image-file)]
+ (is (some? predictions))
+ (is (= 5 (count predictions)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(= 5 (count (second %))) predictions))
+ (is (every? #(< 0 (first (second %)) 1) predictions))
+ (is (= ["car" "bicycle" "dog" "bicycle" "person"]
+ (map first predictions)))))
+
+(deftest test-batch-detection
+ (let [detector (create-detector)
+ batch-predictions (detect-images-in-dir detector image-dir)
+ predictions (first batch-predictions)]
+ (is (some? batch-predictions))
+ (is (= 5 (count predictions)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(= 5 (count (second %))) predictions))
+ (is (every? #(< 0 (first (second %)) 1) predictions))))
diff --git a/contrib/clojure-package/examples/infer/predictor/.gitignore b/contrib/clojure-package/examples/infer/predictor/.gitignore
new file mode 100644
index 000000000000..35491f1a084a
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/predictor/.gitignore
@@ -0,0 +1,12 @@
+/target
+/classes
+/checkouts
+/images
+pom.xml
+pom.xml.asc
+*.jar
+*.class
+/.lein-*
+/.nrepl-port
+.hgignore
+.hg/
diff --git a/contrib/clojure-package/examples/infer/predictor/README.md b/contrib/clojure-package/examples/infer/predictor/README.md
new file mode 100644
index 000000000000..9ca71cf469a0
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/predictor/README.md
@@ -0,0 +1,24 @@
+# predictor
+
+Run model prediction using clojure infer package.
+
+## Installation
+
+Before you run this example, make sure that you have the clojure package installed.
+In the main clojure package directory, do `lein install`. Then you can run
+`lein install` in this directory.
+
+## Usage
+
+```
+$ chmod +x scripts/get_resnet_18_data.sh
+$ ./scripts/get_resnet_18_data.sh
+$
+$ lein run -- --help
+$ lein run -- -m models/resnet-18/resnet-18 -i images/kitten.jpg
+$
+$ lein uberjar
+$ java -jar target/predictor-0.1.0-SNAPSHOT-standalone.jar --help
+$ java -jar target/predictor-0.1.0-SNAPSHOT-standalone.jar \
+ -m models/resnet-18/resnet-18 -i images/kitten.jpg
+```
diff --git a/contrib/clojure-package/examples/infer/predictor/project.clj b/contrib/clojure-package/examples/infer/predictor/project.clj
new file mode 100644
index 000000000000..0bd1eaee671d
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/predictor/project.clj
@@ -0,0 +1,25 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(defproject predictor "0.1.0-SNAPSHOT"
+ :description "Model prediction using infer with MXNet"
+ :plugins [[lein-cljfmt "0.5.7"]]
+ :dependencies [[org.clojure/clojure "1.9.0"]
+ [org.clojure/tools.cli "0.4.1"]
+ [org.apache.mxnet.contrib.clojure/clojure-mxnet "1.5.0-SNAPSHOT"]]
+ :main ^:skip-aot infer.predictor-example
+ :profiles {:uberjar {:aot :all}})
diff --git a/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_18_data.sh b/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_18_data.sh
new file mode 100755
index 000000000000..cf85355fae2d
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_18_data.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -evx
+
+MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd)
+
+data_path=$MXNET_ROOT/models/resnet-18/
+
+image_path=$MXNET_ROOT/images/
+
+if [ ! -d "$data_path" ]; then
+ mkdir -p "$data_path"
+fi
+
+if [ ! -d "$image_path" ]; then
+ mkdir -p "$image_path"
+fi
+
+if [ ! -f "$data_path/resnet-18-0000.params" ]; then
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-symbol.json -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-0000.params -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/synset.txt -P $data_path
+fi
+
+if [ ! -f "$image_path/kitten.jpg" ]; then
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -P $image_path
+fi
diff --git a/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_data.sh b/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_data.sh
new file mode 100755
index 000000000000..fcef59bacc6f
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_data.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -e
+
+MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd)
+
+data_path=$MXNET_ROOT/models/resnet-152/
+
+image_path=$MXNET_ROOT/images/
+
+if [ ! -d "$data_path" ]; then
+ mkdir -p "$data_path"
+fi
+
+if [ ! -d "$image_path" ]; then
+ mkdir -p "$image_path"
+fi
+
+if [ ! -f "$data_path/resnet-152-0000.params" ]; then
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-0000.params -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-symbol.json -P $data_path
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/synset.txt -P $data_path
+fi
+
+if [ ! -f "$image_path/kitten.jpg" ]; then
+ wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -P $image_path
+fi
diff --git a/contrib/clojure-package/examples/infer/predictor/src/infer/predictor_example.clj b/contrib/clojure-package/examples/infer/predictor/src/infer/predictor_example.clj
new file mode 100644
index 000000000000..498964128dd8
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/predictor/src/infer/predictor_example.clj
@@ -0,0 +1,101 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns infer.predictor-example
+ (:require [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.image :as image]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [org.apache.clojure-mxnet.ndarray :as ndarray]
+ [clojure.java.io :as io]
+ [clojure.string :refer [join split]]
+ [clojure.tools.cli :refer [parse-opts]])
+ (:gen-class))
+
+(defn check-valid-file
+ "Check that the file exists"
+ [input-file]
+ (.exists (io/file input-file)))
+
+(def cli-options
+ [["-m" "--model-path-prefix PREFIX" "Model path prefix"
+ :default "models/resnet-18/resnet-18"
+ :validate [#(check-valid-file (str % "-symbol.json"))
+ "Model path prefix is invalid"]]
+ ["-i" "--input-image IMAGE" "Image path"
+ :default "images/kitten.jpg"
+ :validate [check-valid-file "Input image path not found"]]
+ ["-h" "--help"]])
+
+(defn print-prediction
+ [prediction]
+ (println (apply str (repeat 80 "=")))
+ (println prediction)
+ (println (apply str (repeat 80 "="))))
+
+(defn preprocess
+ "Preprocesses image to make it ready for prediction"
+ [image-path width height]
+ (-> image-path
+ infer/load-image-from-file
+ (infer/reshape-image width height)
+ (infer/buffered-image-to-pixels [3 width height])
+ (ndarray/expand-dims 0)))
+
+(defn do-inference
+ "Run inference using given predictor"
+ [predictor image]
+ (let [[predictions] (infer/predict-with-ndarray predictor [image])]
+ predictions))
+
+(defn postprocess
+ [model-path-prefix predictions]
+ (let [synset-file (-> model-path-prefix
+ io/file
+ (.getParent)
+ (io/file "synset.txt"))
+ synset-names (split (slurp synset-file) #"\n")
+ [max-idx] (ndarray/->int-vec (ndarray/argmax predictions 1))]
+ (synset-names max-idx)))
+
+(defn run-predictor
+ "Runs an image classifier based on options provided"
+ [options]
+ (let [{:keys [model-path-prefix input-image]} options
+ width 224
+ height 224
+ descriptors [{:name "data"
+ :shape [1 3 height width]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)
+ predictor (infer/create-predictor
+ factory
+ {:contexts [(context/default-context)]})
+ image-ndarray (preprocess input-image width height)
+ predictions (do-inference predictor image-ndarray)
+ best-prediction (postprocess model-path-prefix predictions)]
+ (print-prediction best-prediction)))
+
+(defn -main
+ [& args]
+ (let [{:keys [options summary errors] :as opts}
+ (parse-opts args cli-options)]
+ (cond
+ (:help options) (println summary)
+ (some? errors) (println (join "\n" errors))
+ :else (run-predictor options))))
diff --git a/contrib/clojure-package/examples/infer/predictor/test/infer/predictor_example_test.clj b/contrib/clojure-package/examples/infer/predictor/test/infer/predictor_example_test.clj
new file mode 100644
index 000000000000..02f826fbb77f
--- /dev/null
+++ b/contrib/clojure-package/examples/infer/predictor/test/infer/predictor_example_test.clj
@@ -0,0 +1,51 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns infer.predictor-example-test
+ (:require [infer.predictor-example :refer [preprocess
+ do-inference
+ postprocess]]
+ [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [clojure.java.io :as io]
+ [clojure.java.shell :refer [sh]]
+ [clojure.test :refer :all]))
+
+(def model-dir "models/")
+(def image-file "images/kitten.jpg")
+(def model-path-prefix (str model-dir "resnet-18/resnet-18"))
+(def width 224)
+(def height 224)
+
+(when-not (.exists (io/file (str model-path-prefix "-symbol.json")))
+ (sh "./scripts/get_resnet_18_data.sh"))
+
+(defn create-predictor []
+ (let [descriptors [{:name "data"
+ :shape [1 3 height width]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)]
+ (infer/create-predictor factory)))
+
+(deftest predictor-test
+ (let [predictor (create-predictor)
+ image-ndarray (preprocess image-file width height)
+ predictions (do-inference predictor image-ndarray)
+ best-prediction (postprocess model-path-prefix predictions)]
+ (is (= "n02123159 tiger cat" best-prediction))))
diff --git a/contrib/clojure-package/integration-tests.sh b/contrib/clojure-package/integration-tests.sh
index 3297fdc2c329..6e5868712026 100755
--- a/contrib/clojure-package/integration-tests.sh
+++ b/contrib/clojure-package/integration-tests.sh
@@ -18,11 +18,11 @@
set -evx
-MXNET_HOME=${PWD}
+MXNET_HOME=$(cd "$(dirname $0)/../.."; pwd)
EXAMPLES_HOME=${MXNET_HOME}/contrib/clojure-package/examples
#cd ${MXNET_HOME}/contrib/clojure-package
#lein test
#lein cloverage --codecov
-for i in `find ${EXAMPLES_HOME} -name test` ; do
-cd ${i} && lein test
+for test_dir in `find ${EXAMPLES_HOME} -name test` ; do
+ cd ${test_dir} && lein test
done
diff --git a/contrib/clojure-package/project.clj b/contrib/clojure-package/project.clj
index 12a0504e02d5..c4428ce6eff4 100644
--- a/contrib/clojure-package/project.clj
+++ b/contrib/clojure-package/project.clj
@@ -29,7 +29,7 @@
;[org.apache.mxnet/mxnet-full_2.11-linux-x86_64-gpu "1.2.1"]
;;; CI
- [org.apache.mxnet/mxnet-full_2.11-linux-x86_64-cpu "1.5.0-SNAPSHOT"]
+ [org.apache.mxnet/mxnet-full_2.11 "INTERNAL"]
[org.clojure/tools.logging "0.4.0"]
[org.apache.logging.log4j/log4j-core "2.8.1"]
diff --git a/contrib/clojure-package/scripts/infer/get_resnet_18_data.sh b/contrib/clojure-package/scripts/infer/get_resnet_18_data.sh
new file mode 100755
index 000000000000..601f362c4159
--- /dev/null
+++ b/contrib/clojure-package/scripts/infer/get_resnet_18_data.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+set -evx
+
+if [ ! -z "$MXNET_HOME" ]; then
+ data_path="$MXNET_HOME/data"
+else
+ MXNET_ROOT=$(cd "$(dirname $0)/../.."; pwd)
+ data_path="$MXNET_ROOT/data"
+fi
+
+if [ ! -d "$data_path" ]; then
+ mkdir -p "$data_path"
+fi
+
+resnet_18_data_path="$data_path/resnet-18"
+if [ ! -f "$resnet_18_data_path/resnet-18-0000.params" ]; then
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-symbol.json -P $resnet_18_data_path
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-0000.params -P $resnet_18_data_path
+ wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/synset.txt -P $resnet_18_data_path
+fi
diff --git a/contrib/clojure-package/scripts/infer/get_ssd_data.sh b/contrib/clojure-package/scripts/infer/get_ssd_data.sh
new file mode 100755
index 000000000000..96e27a12d280
--- /dev/null
+++ b/contrib/clojure-package/scripts/infer/get_ssd_data.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+set -evx
+
+if [ ! -z "$MXNET_HOME" ]; then
+ data_path="$MXNET_HOME/data"
+else
+ MXNET_ROOT=$(cd "$(dirname $0)/../.."; pwd)
+ data_path="$MXNET_ROOT/data"
+fi
+
+if [ ! -d "$data_path" ]; then
+ mkdir -p "$data_path"
+fi
+
+resnet50_ssd_data_path="$data_path/resnet50_ssd"
+if [ ! -f "$resnet50_ssd_data_path/resnet50_ssd_model-0000.params" ]; then
+ wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-symbol.json -P $resnet50_ssd_data_path
+ wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-0000.params -P $resnet50_ssd_data_path
+ wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/synset.txt -P $resnet50_ssd_data_path
+fi
diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/image.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/image.clj
index 6e726eba9da6..e2e87ed47e2f 100644
--- a/contrib/clojure-package/src/org/apache/clojure_mxnet/image.clj
+++ b/contrib/clojure-package/src/org/apache/clojure_mxnet/image.clj
@@ -62,8 +62,8 @@
(util/validate! ::optional-color-flag color-flag "Invalid color flag")
(util/validate! ::optional-to-rgb to-rgb "Invalid conversion flag")
(util/validate! ::output output "Invalid output")
- (Image/imRead
- filename
+ (Image/imRead
+ filename
($/option color-flag)
($/option to-rgb)
($/option output)))
@@ -89,7 +89,7 @@
(defn apply-border
"Pad image border"
- ([input top bottom left right
+ ([input top bottom left right
{:keys [fill-type value values output]
:or {fill-type nil value nil values nil output nil}
:as opts}]
diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/infer.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/infer.clj
new file mode 100644
index 000000000000..224a39275dac
--- /dev/null
+++ b/contrib/clojure-package/src/org/apache/clojure_mxnet/infer.clj
@@ -0,0 +1,345 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns org.apache.clojure-mxnet.infer
+ (:refer-clojure :exclude [type])
+ (:require [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.io :as mx-io]
+ [org.apache.clojure-mxnet.shape :as shape]
+ [org.apache.clojure-mxnet.util :as util]
+ [clojure.spec.alpha :as s])
+ (:import (java.awt.image BufferedImage)
+ (org.apache.mxnet NDArray)
+ (org.apache.mxnet.infer Classifier ImageClassifier
+ ObjectDetector Predictor)))
+
+(s/def ::predictor #(instance? Predictor %))
+(s/def ::classifier #(instance? Classifier %))
+(s/def ::image-classifier #(instance? ImageClassifier %))
+(s/def ::object-detector #(instance? ObjectDetector %))
+
+(defrecord WrappedPredictor [predictor])
+(defrecord WrappedClassifier [classifier])
+(defrecord WrappedImageClassifier [image-classifier])
+(defrecord WrappedObjectDetector [object-detector])
+
+(s/def ::ndarray #(instance? NDArray %))
+(s/def ::float-array (s/and #(.isArray (class %)) #(every? float? %)))
+(s/def ::vec-of-float-arrays (s/coll-of ::float-array :kind vector?))
+(s/def ::vec-of-ndarrays (s/coll-of ::ndarray :kind vector?))
+
+(s/def ::wrapped-predictor (s/keys :req-un [::predictor]))
+(s/def ::wrapped-classifier (s/keys :req-un [::classifier]))
+(s/def ::wrapped-image-classifier (s/keys :req-un [::image-classifier]))
+(s/def ::wrapped-detector (s/keys :req-un [::object-detector]))
+
+(defprotocol APredictor
+ (predict [wrapped-predictor inputs])
+ (predict-with-ndarray [wrapped-predictor input-arrays]))
+
+(defprotocol AClassifier
+ (classify
+ [wrapped-classifier inputs]
+ [wrapped-classifier inputs topk])
+ (classify-with-ndarray
+ [wrapped-classifier inputs]
+ [wrapped-classifier inputs topk]))
+
+(defprotocol AImageClassifier
+ (classify-image
+ [wrapped-image-classifier image]
+ [wrapped-image-classifier image topk]
+ [wrapped-image-classifier image topk dtype])
+ (classify-image-batch
+ [wrapped-image-classifier images]
+ [wrapped-image-classifier images topk]
+ [wrapped-image-classifier images topk dtype]))
+
+(defprotocol AObjectDetector
+ (detect-objects
+ [wrapped-detector image]
+ [wrapped-detector image topk])
+ (detect-objects-batch
+ [wrapped-detector images]
+ [wrapped-detector images topk])
+ (detect-objects-with-ndarrays
+ [wrapped-detector input-arrays]
+ [wrapped-detector input-arrays topk]))
+
+(extend-protocol APredictor
+ WrappedPredictor
+ (predict
+ [wrapped-predictor inputs]
+ (util/validate! ::wrapped-predictor wrapped-predictor
+ "Invalid predictor")
+ (util/validate! ::vec-of-float-arrays inputs
+ "Invalid inputs")
+ (util/coerce-return-recursive
+ (.predict (:predictor wrapped-predictor)
+ (util/vec->indexed-seq inputs))))
+ (predict-with-ndarray [wrapped-predictor input-arrays]
+ (util/validate! ::wrapped-predictor wrapped-predictor
+ "Invalid predictor")
+ (util/validate! ::vec-of-ndarrays input-arrays
+ "Invalid input arrays")
+ (util/coerce-return-recursive
+ (.predictWithNDArray (:predictor wrapped-predictor)
+ (util/vec->indexed-seq input-arrays)))))
+
+(s/def ::nil-or-int (s/nilable int?))
+
+(extend-protocol AClassifier
+ WrappedClassifier
+ (classify
+ ([wrapped-classifier inputs]
+ (classify wrapped-classifier inputs nil))
+ ([wrapped-classifier inputs topk]
+ (util/validate! ::wrapped-classifier wrapped-classifier
+ "Invalid classifier")
+ (util/validate! ::vec-of-float-arrays inputs
+ "Invalid inputs")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/coerce-return-recursive
+ (.classify (:classifier wrapped-classifier)
+ (util/vec->indexed-seq inputs)
+ (util/->int-option topk)))))
+ (classify-with-ndarray
+ ([wrapped-classifier inputs]
+ (classify-with-ndarray wrapped-classifier inputs nil))
+ ([wrapped-classifier inputs topk]
+ (util/validate! ::wrapped-classifier wrapped-classifier
+ "Invalid classifier")
+ (util/validate! ::vec-of-ndarrays inputs
+ "Invalid inputs")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/coerce-return-recursive
+ (.classifyWithNDArray (:classifier wrapped-classifier)
+ (util/vec->indexed-seq inputs)
+ (util/->int-option topk)))))
+ WrappedImageClassifier
+ (classify
+ ([wrapped-image-classifier inputs]
+ (classify wrapped-image-classifier inputs nil))
+ ([wrapped-image-classifier inputs topk]
+ (util/validate! ::wrapped-image-classifier wrapped-image-classifier
+ "Invalid classifier")
+ (util/validate! ::vec-of-float-arrays inputs
+ "Invalid inputs")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/coerce-return-recursive
+ (.classify (:image-classifier wrapped-image-classifier)
+ (util/vec->indexed-seq inputs)
+ (util/->int-option topk)))))
+ (classify-with-ndarray
+ ([wrapped-image-classifier inputs]
+ (classify-with-ndarray wrapped-image-classifier inputs nil))
+ ([wrapped-image-classifier inputs topk]
+ (util/validate! ::wrapped-image-classifier wrapped-image-classifier
+ "Invalid classifier")
+ (util/validate! ::vec-of-ndarrays inputs
+ "Invalid inputs")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/coerce-return-recursive
+ (.classifyWithNDArray (:image-classifier wrapped-image-classifier)
+ (util/vec->indexed-seq inputs)
+ (util/->int-option topk))))))
+
+(s/def ::image #(instance? BufferedImage %))
+(s/def ::dtype #{dtype/UINT8 dtype/INT32 dtype/FLOAT16 dtype/FLOAT32 dtype/FLOAT64})
+
+(extend-protocol AImageClassifier
+ WrappedImageClassifier
+ (classify-image
+ ([wrapped-image-classifier image]
+ (classify-image wrapped-image-classifier image nil dtype/FLOAT32))
+ ([wrapped-image-classifier image topk]
+ (classify-image wrapped-image-classifier image topk dtype/FLOAT32))
+ ([wrapped-image-classifier image topk dtype]
+ (util/validate! ::wrapped-image-classifier wrapped-image-classifier
+ "Invalid classifier")
+ (util/validate! ::image image "Invalid image")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/validate! ::dtype dtype "Invalid dtype")
+ (util/coerce-return-recursive
+ (.classifyImage (:image-classifier wrapped-image-classifier)
+ image
+ (util/->int-option topk)
+ dtype))))
+ (classify-image-batch
+ ([wrapped-image-classifier images]
+ (classify-image-batch wrapped-image-classifier images nil dtype/FLOAT32))
+ ([wrapped-image-classifier images topk]
+ (classify-image-batch wrapped-image-classifier images topk dtype/FLOAT32))
+ ([wrapped-image-classifier images topk dtype]
+ (util/validate! ::wrapped-image-classifier wrapped-image-classifier
+ "Invalid classifier")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/validate! ::dtype dtype "Invalid dtype")
+ (util/coerce-return-recursive
+ (.classifyImageBatch (:image-classifier wrapped-image-classifier)
+ images
+ (util/->int-option topk)
+ dtype)))))
+
+(extend-protocol AObjectDetector
+ WrappedObjectDetector
+ (detect-objects
+ ([wrapped-detector image]
+ (detect-objects wrapped-detector image nil))
+ ([wrapped-detector image topk]
+ (util/validate! ::wrapped-detector wrapped-detector
+ "Invalid object detector")
+ (util/validate! ::image image "Invalid image")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/coerce-return-recursive
+ (.imageObjectDetect (:object-detector wrapped-detector)
+ image
+ (util/->int-option topk)))))
+ (detect-objects-batch
+ ([wrapped-detector images]
+ (detect-objects-batch wrapped-detector images nil))
+ ([wrapped-detector images topk]
+ (util/validate! ::wrapped-detector wrapped-detector
+ "Invalid object detector")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/coerce-return-recursive
+ (.imageBatchObjectDetect (:object-detector wrapped-detector)
+ images
+ (util/->int-option topk)))))
+ (detect-objects-with-ndarrays
+ ([wrapped-detector input-arrays]
+ (detect-objects-with-ndarrays wrapped-detector input-arrays nil))
+ ([wrapped-detector input-arrays topk]
+ (util/validate! ::wrapped-detector wrapped-detector
+ "Invalid object detector")
+ (util/validate! ::vec-of-ndarrays input-arrays
+ "Invalid inputs")
+ (util/validate! ::nil-or-int topk "Invalid top-K")
+ (util/coerce-return-recursive
+ (.objectDetectWithNDArray (:object-detector wrapped-detector)
+ (util/vec->indexed-seq input-arrays)
+ (util/->int-option topk))))))
+
+(defprotocol AInferenceFactory
+ (create-predictor [factory] [factory opts])
+ (create-classifier [factory] [factory opts])
+ (create-image-classifier [factory] [factory opts])
+ (create-object-detector [factory] [factory opts]))
+
+(defn convert-descriptors
+ [descriptors]
+ (util/vec->indexed-seq
+ (into [] (map mx-io/data-desc descriptors))))
+
+(defrecord InferenceFactory [model-path-prefix input-descriptors]
+ AInferenceFactory
+ (create-predictor
+ [factory]
+ (create-predictor factory {}))
+ (create-predictor
+ [factory opts]
+ (let [{:keys [contexts epoch]
+ :or {contexts [(context/cpu)] epoch 0}} opts]
+ (->WrappedPredictor
+ (new Predictor
+ model-path-prefix
+ (convert-descriptors input-descriptors)
+ (into-array contexts)
+ (util/->int-option epoch)))))
+ (create-classifier
+ [factory]
+ (create-classifier factory {}))
+ (create-classifier
+ [factory opts]
+ (let [{:keys [contexts epoch]
+ :or {contexts [(context/cpu)] epoch 0}} opts]
+ (->WrappedClassifier
+ (new Classifier
+ model-path-prefix
+ (convert-descriptors input-descriptors)
+ (into-array contexts)
+ (util/->int-option epoch)))))
+ (create-image-classifier
+ [factory]
+ (create-image-classifier factory {}))
+ (create-image-classifier
+ [factory opts]
+ (let [{:keys [contexts epoch]
+ :or {contexts [(context/cpu)] epoch 0}} opts]
+ (->WrappedImageClassifier
+ (new ImageClassifier
+ model-path-prefix
+ (convert-descriptors input-descriptors)
+ (into-array contexts)
+ (util/->int-option epoch)))))
+ (create-object-detector
+ [factory]
+ (create-object-detector factory {}))
+ (create-object-detector
+ [factory opts]
+ (let [{:keys [contexts epoch]
+ :or {contexts [(context/cpu)] epoch 0}} opts]
+ (->WrappedObjectDetector
+ (new ObjectDetector
+ model-path-prefix
+ (convert-descriptors input-descriptors)
+ (into-array contexts)
+ (util/->int-option epoch))))))
+
+(s/def ::model-path-prefix string?)
+(s/def ::input-descriptors (s/coll-of ::mx-io/data-desc))
+
+(defn model-factory
+ "Creates a factory that can be used to instantiate an image classifier
+ predictor or object detector"
+ [model-path-prefix input-descriptors]
+ (util/validate! ::model-path-prefix model-path-prefix
+ "Invalid model path prefix")
+ (util/validate! ::input-descriptors input-descriptors
+ "Invalid input descriptors")
+ (->InferenceFactory model-path-prefix input-descriptors))
+
+(defn reshape-image
+ "Reshape an image to a new shape"
+ [image width height]
+ (util/validate! ::image image "Invalid image")
+ (util/validate! int? width "Invalid width")
+ (util/validate! int? height "Invalid height")
+ (ImageClassifier/reshapeImage image width height))
+
+(defn buffered-image-to-pixels
+ "Convert input BufferedImage to NDArray of input shape"
+ [image input-shape-vec]
+ (util/validate! ::image image "Invalid image")
+ (util/validate! (s/coll-of int?) input-shape-vec "Invalid shape vector")
+ (ImageClassifier/bufferedImageToPixels image (shape/->shape input-shape-vec) dtype/FLOAT32))
+
+(s/def ::image-path string?)
+(s/def ::image-paths (s/coll-of ::image-path))
+
+(defn load-image-from-file
+ "Loads an input image given a file name"
+ [image-path]
+ (util/validate! ::image-path image-path "Invalid image path")
+ (ImageClassifier/loadImageFromFile image-path))
+
+(defn load-image-paths
+ "Loads images from a list of file names"
+ [image-paths]
+ (util/validate! ::image-paths image-paths "Invalid image paths")
+ (ImageClassifier/loadInputBatch (util/convert-vector image-paths)))
diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/primitives.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/primitives.clj
new file mode 100644
index 000000000000..0967df2289d8
--- /dev/null
+++ b/contrib/clojure-package/src/org/apache/clojure_mxnet/primitives.clj
@@ -0,0 +1,46 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns org.apache.clojure-mxnet.primitives
+ (:import (org.apache.mxnet MX_PRIMITIVES$MX_FLOAT MX_PRIMITIVES$MX_Double
+ MX_PRIMITIVES$MX_PRIMITIVE_TYPE)))
+
+
+;;; Defines customer mx primitives that can be used for mathematical computations
+;;; in NDArrays to control precision. Currently Float and Double are supported
+
+;;; For purposes of automatic conversion in ndarray functions, doubles are default
+;; to specify using floats you must use a Float
+
+(defn mx-float
+ "Creates a MXNet float primitive"
+ [num]
+ (new MX_PRIMITIVES$MX_FLOAT num))
+
+(defn mx-double
+ "Creates a MXNet double primitive"
+ [num]
+ (new MX_PRIMITIVES$MX_Double num))
+
+(defn ->num
+ "Returns the underlying number value"
+ [primitive]
+ (.data primitive))
+
+(defn primitive? [x]
+ (instance? MX_PRIMITIVES$MX_PRIMITIVE_TYPE x))
+
diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/util.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/util.clj
index 6f22b0eb3a0f..43970c0abd79 100644
--- a/contrib/clojure-package/src/org/apache/clojure_mxnet/util.clj
+++ b/contrib/clojure-package/src/org/apache/clojure_mxnet/util.clj
@@ -19,6 +19,7 @@
(:require [clojure.spec.alpha :as s]
[t6.from-scala.core :refer [$ $$] :as $]
[clojure.string :as string]
+ [org.apache.clojure-mxnet.primitives :as primitives]
[org.apache.clojure-mxnet.shape :as mx-shape])
(:import (org.apache.mxnet NDArray)
(scala Product Tuple2 Tuple3)
@@ -36,7 +37,8 @@
"byte<>" "byte-array"
"java.lang.String<>" "vec-or-strings"
"org.apache.mxnet.NDArray" "ndarray"
- "org.apache.mxnet.Symbol" "sym"})
+ "org.apache.mxnet.Symbol" "sym"
+ "org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE" "double-or-float"})
(def symbol-param-coerce {"java.lang.String" "sym-name"
"float" "num"
@@ -66,6 +68,9 @@
(defn ->option [v]
($ Option v))
+(defn ->int-option [v]
+ (->option (when v (int v))))
+
(defn option->value [opt]
($/view opt))
@@ -141,6 +146,8 @@
(and (get targets "int<>") (vector? param)) (int-array param)
(and (get targets "float<>") (vector? param)) (float-array param)
(and (get targets "java.lang.String<>") (vector? param)) (into-array param)
+ (and (get targets "org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE") (instance? Float param)) (primitives/mx-float param)
+ (and (get targets "org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE") (number? param)) (primitives/mx-double param)
:else param))
(defn nil-or-coerce-param [param targets]
@@ -174,8 +181,15 @@
(instance? Map return-val) (scala-map->map return-val)
(instance? Tuple2 return-val) (tuple->vec return-val)
(instance? Tuple3 return-val) (tuple->vec return-val)
+ (primitives/primitive? return-val) (primitives/->num return-val)
:else return-val))
+(defn coerce-return-recursive [return-val]
+ (let [coerced-val (coerce-return return-val)]
+ (if (vector? coerced-val)
+ (into [] (map coerce-return-recursive coerced-val))
+ coerced-val)))
+
(defmacro scala-fn
"Creates a scala fn from an anonymous clojure fn of the form (fn [x] body)"
[f]
diff --git a/contrib/clojure-package/test/good-test-ndarray.clj b/contrib/clojure-package/test/good-test-ndarray.clj
index 3b53b1906006..b048a819c642 100644
--- a/contrib/clojure-package/test/good-test-ndarray.clj
+++ b/contrib/clojure-package/test/good-test-ndarray.clj
@@ -27,11 +27,12 @@
(defn
div
- ([ndarray num-or-ndarray]
+ ([ndarray ndarray-or-double-or-float]
(util/coerce-return
(.$div
ndarray
(util/coerce-param
- num-or-ndarray
- #{"float" "org.apache.mxnet.NDArray"})))))
+ ndarray-or-double-or-float
+ #{"org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE"
+ "org.apache.mxnet.NDArray"})))))
diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/imageclassifier_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/imageclassifier_test.clj
new file mode 100644
index 000000000000..b459b06132b2
--- /dev/null
+++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/imageclassifier_test.clj
@@ -0,0 +1,76 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns org.apache.clojure-mxnet.infer.imageclassifier-test
+ (:require [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [clojure.java.io :as io]
+ [clojure.java.shell :refer [sh]]
+ [clojure.test :refer :all]))
+
+(def model-dir "data/")
+(def model-path-prefix (str model-dir "resnet-18/resnet-18"))
+
+(when-not (.exists (io/file (str model-path-prefix "-symbol.json")))
+ (sh "./scripts/infer/get_resnet_18_data.sh"))
+
+(defn create-classifier []
+ (let [descriptors [{:name "data"
+ :shape [1 3 224 224]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)]
+ (infer/create-image-classifier factory)))
+
+(deftest test-single-classification
+ (let [classifier (create-classifier)
+ image (infer/load-image-from-file "test/test-images/kitten.jpg")
+ [predictions-all] (infer/classify-image classifier image)
+ [predictions-with-default-dtype] (infer/classify-image classifier image 10)
+ [predictions] (infer/classify-image classifier image 5 dtype/FLOAT32)]
+ (is (= 1000 (count predictions-all)))
+ (is (= 10 (count predictions-with-default-dtype)))
+ (is (some? predictions))
+ (is (= 5 (count predictions)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(float? (second %)) predictions))
+ (is (every? #(< 0 (second %) 1) predictions))
+ (is (= ["n02123159 tiger cat"
+ "n02124075 Egyptian cat"
+ "n02123045 tabby, tabby cat"
+ "n02127052 lynx, catamount"
+ "n02128757 snow leopard, ounce, Panthera uncia"]
+ (map first predictions)))))
+
+(deftest test-batch-classification
+ (let [classifier (create-classifier)
+ image-batch (infer/load-image-paths ["test/test-images/kitten.jpg"
+ "test/test-images/Pug-Cookie.jpg"])
+ batch-predictions-all (infer/classify-image-batch classifier image-batch)
+ batch-predictions-with-default-dtype (infer/classify-image-batch classifier image-batch 10)
+ batch-predictions (infer/classify-image-batch classifier image-batch 5 dtype/FLOAT32)
+ predictions (first batch-predictions)]
+ (is (= 1000 (count (first batch-predictions-all))))
+ (is (= 10 (count (first batch-predictions-with-default-dtype))))
+ (is (some? batch-predictions))
+ (is (= 5 (count predictions)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(float? (second %)) predictions))
+ (is (every? #(< 0 (second %) 1) predictions))))
diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/objectdetector_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/objectdetector_test.clj
new file mode 100644
index 000000000000..3a0e3d30a1d9
--- /dev/null
+++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/objectdetector_test.clj
@@ -0,0 +1,67 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns org.apache.clojure-mxnet.infer.objectdetector-test
+ (:require [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [clojure.java.io :as io]
+ [clojure.java.shell :refer [sh]]
+ [clojure.test :refer :all]))
+
+(def model-dir "data/")
+(def model-path-prefix (str model-dir "resnet50_ssd/resnet50_ssd_model"))
+
+(when-not (.exists (io/file (str model-path-prefix "-symbol.json")))
+ (sh "./scripts/infer/get_ssd_data.sh"))
+
+(defn create-detector []
+ (let [descriptors [{:name "data"
+ :shape [1 3 512 512]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)]
+ (infer/create-object-detector factory)))
+
+(deftest test-single-detection
+ (let [detector (create-detector)
+ image (infer/load-image-from-file "test/test-images/kitten.jpg")
+ [predictions-all] (infer/detect-objects detector image)
+ [predictions] (infer/detect-objects detector image 5)]
+ (is (some? predictions))
+ (is (= 5 (count predictions)))
+ (is (= 13 (count predictions-all)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(= 5 (count (second %))) predictions))
+ (is (every? #(< 0 (first (second %)) 1) predictions))
+ (is (= "cat" (first (first predictions))))))
+
+(deftest test-batch-detection
+ (let [detector (create-detector)
+ image-batch (infer/load-image-paths ["test/test-images/kitten.jpg"
+ "test/test-images/Pug-Cookie.jpg"])
+ batch-predictions-all (infer/detect-objects-batch detector image-batch)
+ batch-predictions (infer/detect-objects-batch detector image-batch 5)
+ predictions (first batch-predictions)]
+ (is (some? batch-predictions))
+ (is (= 13 (count (first batch-predictions-all))))
+ (is (= 5 (count predictions)))
+ (is (every? #(= 2 (count %)) predictions))
+ (is (every? #(string? (first %)) predictions))
+ (is (every? #(= 5 (count (second %))) predictions))
+ (is (every? #(< 0 (first (second %)) 1) predictions))))
diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/predictor_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/predictor_test.clj
new file mode 100644
index 000000000000..0e7532bc2258
--- /dev/null
+++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/predictor_test.clj
@@ -0,0 +1,59 @@
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns org.apache.clojure-mxnet.infer.predictor-test
+ (:require [org.apache.clojure-mxnet.context :as context]
+ [org.apache.clojure-mxnet.dtype :as dtype]
+ [org.apache.clojure-mxnet.infer :as infer]
+ [org.apache.clojure-mxnet.layout :as layout]
+ [org.apache.clojure-mxnet.ndarray :as ndarray]
+ [org.apache.clojure-mxnet.shape :as shape]
+ [clojure.java.io :as io]
+ [clojure.java.shell :refer [sh]]
+ [clojure.string :refer [split]]
+ [clojure.test :refer :all]))
+
+(def model-dir "data/")
+(def model-path-prefix (str model-dir "resnet-18/resnet-18"))
+(def width 224)
+(def height 224)
+
+(when-not (.exists (io/file (str model-path-prefix "-symbol.json")))
+ (sh "./scripts/infer/get_resnet_18_data.sh"))
+
+(defn create-predictor []
+ (let [descriptors [{:name "data"
+ :shape [1 3 height width]
+ :layout layout/NCHW
+ :dtype dtype/FLOAT32}]
+ factory (infer/model-factory model-path-prefix descriptors)]
+ (infer/create-predictor factory)))
+
+(deftest predictor-test
+ (let [predictor (create-predictor)
+ image-ndarray (-> "test/test-images/kitten.jpg"
+ infer/load-image-from-file
+ (infer/reshape-image width height)
+ (infer/buffered-image-to-pixels [3 width height])
+ (ndarray/expand-dims 0))
+ [predictions] (infer/predict-with-ndarray predictor [image-ndarray])
+ synset-file (-> (io/file model-path-prefix)
+ (.getParent)
+ (io/file "synset.txt"))
+ synset-names (split (slurp synset-file) #"\n")
+ [best-index] (ndarray/->int-vec (ndarray/argmax predictions 1))
+ best-prediction (synset-names best-index)]
+ (is (= "n02123159 tiger cat" best-prediction))))
diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/ndarray_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/ndarray_test.clj
index 79e94412d0df..9ffd3abed2f9 100644
--- a/contrib/clojure-package/test/org/apache/clojure_mxnet/ndarray_test.clj
+++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/ndarray_test.clj
@@ -97,7 +97,7 @@
(is (= [1.0 1.0] (->vec ndhalves)))))
(deftest test-full
- (let [nda (full [1 2] 3)]
+ (let [nda (full [1 2] 3.0)]
(is (= (shape nda) (mx-shape/->shape [1 2])))
(is (= [3.0 3.0] (->vec nda)))))
diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/primitives_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/primitives_test.clj
new file mode 100644
index 000000000000..1a538e537b8b
--- /dev/null
+++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/primitives_test.clj
@@ -0,0 +1,45 @@
+;;
+;; Licensed to the Apache Software Foundation (ASF) under one or more
+;; contributor license agreements. See the NOTICE file distributed with
+;; this work for additional information regarding copyright ownership.
+;; The ASF licenses this file to You under the Apache License, Version 2.0
+;; (the "License"); you may not use this file except in compliance with
+;; the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+(ns org.apache.clojure-mxnet.primitives-test
+ (:require [org.apache.clojure-mxnet.primitives :as primitives]
+ [clojure.test :refer :all])
+ (:import (org.apache.mxnet MX_PRIMITIVES$MX_PRIMITIVE_TYPE
+ MX_PRIMITIVES$MX_FLOAT
+ MX_PRIMITIVES$MX_Double)))
+
+(deftest test-primitive-types
+ (is (not (primitives/primitive? 3)))
+ (is (primitives/primitive? (primitives/mx-float 3)))
+ (is (primitives/primitive? (primitives/mx-double 3))))
+
+(deftest test-float-primitives
+ (is (instance? MX_PRIMITIVES$MX_PRIMITIVE_TYPE (primitives/mx-float 3)))
+ (is (instance? MX_PRIMITIVES$MX_FLOAT (primitives/mx-float 3)))
+ (is (instance? Float (-> (primitives/mx-float 3)
+ (primitives/->num))))
+ (is (= 3.0 (-> (primitives/mx-float 3)
+ (primitives/->num)))))
+
+(deftest test-double-primitives
+ (is (instance? MX_PRIMITIVES$MX_PRIMITIVE_TYPE (primitives/mx-double 2)))
+ (is (instance? MX_PRIMITIVES$MX_Double (primitives/mx-double 2)))
+ (is (instance? Double (-> (primitives/mx-double 2)
+ (primitives/->num))))
+ (is (= 2.0 (-> (primitives/mx-double 2)
+ (primitives/->num)))))
+
diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj
index ee7710317e4c..c26f83d5aa49 100644
--- a/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj
+++ b/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj
@@ -20,6 +20,7 @@
[org.apache.clojure-mxnet.shape :as mx-shape]
[org.apache.clojure-mxnet.util :as util]
[org.apache.clojure-mxnet.ndarray :as ndarray]
+ [org.apache.clojure-mxnet.primitives :as primitives]
[org.apache.clojure-mxnet.symbol :as sym]
[org.apache.clojure-mxnet.test-util :as test-util]
[clojure.spec.alpha :as s])
@@ -54,6 +55,16 @@
(is (instance? Option x))
(is (= 1 (.get x)))))
+(deftest test->int-option
+ (let [x (util/->int-option 4.5)]
+ (is (instance? Option x))
+ (is (= 4 (.get x)))))
+
+(deftest test-empty->int-option
+ (let [x (util/->int-option nil)]
+ (is (instance? Option x))
+ (is (.isEmpty x))))
+
(deftest test-option->value
(is (= 2 (-> (util/->option 2)
(util/option->value)))))
@@ -123,6 +134,9 @@
(is (= "[F" (->> (util/coerce-param [1 2] #{"float<>"}) str (take 2) (apply str))))
(is (= "[L" (->> (util/coerce-param [1 2] #{"java.lang.String<>"}) str (take 2) (apply str))))
+ (is (primitives/primitive? (util/coerce-param 1.0 #{"org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE"})))
+ (is (primitives/primitive? (util/coerce-param (float 1.0) #{"org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE"})))
+
(is (= 1 (util/coerce-param 1 #{"unknown"}))))
(deftest test-nil-or-coerce-param
@@ -161,6 +175,12 @@
(util/convert-tuple [1 2]))))
(is (= [1 2 3] (util/coerce-return
(util/convert-tuple [1 2 3]))))
+
+ (is (instance? Double (util/coerce-return (primitives/mx-double 3))))
+ (is (= 3.0 (util/coerce-return (primitives/mx-double 3))))
+ (is (instance? Float (util/coerce-return (primitives/mx-float 2))))
+ (is (= 2.0 (util/coerce-return (primitives/mx-float 2))))
+
(is (= "foo" (util/coerce-return "foo"))))
(deftest test-translate-keyword-shape
diff --git a/contrib/clojure-package/test/test-images/Pug-Cookie.jpg b/contrib/clojure-package/test/test-images/Pug-Cookie.jpg
new file mode 100644
index 000000000000..56f5dc16ed7a
Binary files /dev/null and b/contrib/clojure-package/test/test-images/Pug-Cookie.jpg differ
diff --git a/contrib/clojure-package/test/test-images/kitten.jpg b/contrib/clojure-package/test/test-images/kitten.jpg
new file mode 100644
index 000000000000..ffcd2be2c674
Binary files /dev/null and b/contrib/clojure-package/test/test-images/kitten.jpg differ
diff --git a/cpp-package/CMakeLists.txt b/cpp-package/CMakeLists.txt
index f7fbc77e1a5e..5d2977279d74 100644
--- a/cpp-package/CMakeLists.txt
+++ b/cpp-package/CMakeLists.txt
@@ -20,4 +20,6 @@ if(USE_CPP_PACKAGE)
add_subdirectory(example)
endif()
+ install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+
endif()
diff --git a/cpp-package/example/README.md b/cpp-package/example/README.md
index c7223e94c920..c2329330b6be 100644
--- a/cpp-package/example/README.md
+++ b/cpp-package/example/README.md
@@ -2,7 +2,8 @@
## Building C++ examples
-The examples are built while building the MXNet library and cpp-package from source . However, they can be built manually as follows
+The examples in this folder demonstrate the **training** workflow. The **inference workflow** related examples can be found in [inference](~https://github.com/apache/incubator-mxnet/blob/master/cpp-package/example/inference>) folder.
+The examples in this folder are built while building the MXNet library and cpp-package from source . However, they can be built manually as follows
From cpp-package/examples directory
@@ -18,7 +19,7 @@ The examples that are built to be run on GPU may not work on the non-GPU machine
The makefile will also download the necessary data files and store in a data folder. (The download will take couple of minutes, but will be done only once on a fresh installation.)
-## Examples
+## Examples demonstrating training workflow
This directory contains following examples. In order to run the examples, ensure that the path to the MXNet shared library is added to the OS specific environment variable viz. **LD\_LIBRARY\_PATH** for Linux, Mac and Ubuntu OS and **PATH** for Windows OS. For example `export LD_LIBRARY_PATH=/usr/local/cuda/lib64:/home/ubuntu/incubator-mxnet/lib` on ubuntu using gpu.
diff --git a/cpp-package/example/inference/Makefile b/cpp-package/example/inference/Makefile
new file mode 100644
index 000000000000..5efe6cfb68e5
--- /dev/null
+++ b/cpp-package/example/inference/Makefile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+CPPEX_SRC = $(wildcard *.cpp)
+CPPEX_EXE = $(patsubst %.cpp, %, $(CPPEX_SRC))
+OPENCV_CFLAGS=`pkg-config --cflags opencv`
+OPENCV_LDFLAGS=`pkg-config --libs opencv`
+
+CXX=g++
+
+
+CFLAGS=$(COMMFLAGS) -I../../../3rdparty/tvm/nnvm/include -I../../../3rdparty/dmlc-core/include -I ../../include -I ../../../include -Wall -O3 -msse3 -funroll-loops -Wno-unused-parameter -Wno-unknown-pragmas
+CPPEX_EXTRA_LDFLAGS := -L../../../lib -lmxnet $(OPENCV_LDFLAGS)
+
+all: $(CPPEX_EXE)
+
+debug: CPPEX_CFLAGS += -DDEBUG -g
+debug: all
+
+
+$(CPPEX_EXE):% : %.cpp
+ $(CXX) -std=c++0x $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(CPPEX_EXTRA_LDFLAGS)
+
+clean:
+ rm -f $(CPPEX_EXE)
diff --git a/cpp-package/example/inference/README.md b/cpp-package/example/inference/README.md
new file mode 100644
index 000000000000..79831b40b6bd
--- /dev/null
+++ b/cpp-package/example/inference/README.md
@@ -0,0 +1,41 @@
+# MXNet C++ Package Inference Workflow Examples
+
+## Building C++ Inference examples
+
+The examples in this folder demonstrate the **inference** workflow.
+To build examples use following commands:
+
+- Release: **make all**
+- Debug: **make debug all**
+
+
+## Examples demonstrating inference workflow
+
+This directory contains following examples. In order to run the examples, ensure that the path to the MXNet shared library is added to the OS specific environment variable viz. **LD\_LIBRARY\_PATH** for Linux, Mac and Ubuntu OS and **PATH** for Windows OS.
+
+### [inception_inference.cpp](~https://github.com/apache/incubator-mxnet/blob/master/cpp-package/example/inference/inception_inference.cpp>)
+
+This example demonstrates image classification workflow with pre-trained models using MXNet C++ API. The command line parameters the example can accept are as shown below:
+
+```
+./inception_inference --help
+Usage:
+inception_inference --symbol
+ --params
+ --image ) downloads the pre-trained **Inception** model and a test image. The users can invoke this script as follows:
+
+```
+./unit_test_inception_inference.sh
+```
diff --git a/cpp-package/example/inference/inception_inference.cpp b/cpp-package/example/inference/inception_inference.cpp
new file mode 100644
index 000000000000..7005e745b2f4
--- /dev/null
+++ b/cpp-package/example/inference/inception_inference.cpp
@@ -0,0 +1,446 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * This example demonstrates image classification workflow with pre-trained models using MXNet C++ API.
+ * The example performs following tasks.
+ * 1. Load the pre-trained model.
+ * 2. Load the parameters of pre-trained model.
+ * 3. Load the image to be classified in to NDArray.
+ * 4. Normalize the image using the mean of images that were used for training.
+ * 5. Run the forward pass and predict the input image.
+ */
+
+#include
+#include
+#include
+#include
* @param resizedImage BufferedImage to get pixels from
+ *
* @param inputImageShape Input shape; for example for resnet it is (3,224,224).
Should be same as inputDescriptor shape.
+ * @param dType The DataType of the NDArray created from the image
+ * that should be returned.
+ * Currently it defaults to Dtype.Float32
* @return NDArray pixels array with shape (3, 224, 224) in CHW format
*/
- def bufferedImageToPixels(resizedImage: BufferedImage, inputImageShape: Shape): NDArray = {
+ def bufferedImageToPixels(resizedImage: BufferedImage, inputImageShape: Shape,
+ dType : DType = DType.Float32): NDArray = {
+
+ if (dType == DType.Float64) {
+ val result = getFloatPixelsArray(resizedImage)
+ NDArray.array(result.map(_.toDouble), shape = inputImageShape)
+ }
+ else {
+ val result = getFloatPixelsArray(resizedImage)
+ NDArray.array(result, shape = inputImageShape)
+ }
+ }
+
+ private def getFloatPixelsArray(resizedImage: BufferedImage): Array[Float] = {
+
// Get height and width of the image
val w = resizedImage.getWidth()
val h = resizedImage.getHeight()
@@ -166,7 +192,6 @@ object ImageClassifier {
// 3 times height and width for R,G,B channels
val result = new Array[Float](3 * h * w)
-
var row = 0
// copy pixels to array vertically
while (row < h) {
@@ -184,11 +209,10 @@ object ImageClassifier {
}
row += 1
}
+
resizedImage.flush()
- // creating NDArray according to the input shape
- val pixelsArray = NDArray.array(result, shape = inputImageShape)
- pixelsArray
+ result
}
/**
diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala
index d4bce9f0d71e..67692a316cc4 100644
--- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala
+++ b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala
@@ -17,8 +17,9 @@
package org.apache.mxnet.infer
+import org.apache.mxnet.MX_PRIMITIVES.MX_PRIMITIVE_TYPE
import org.apache.mxnet.io.NDArrayIter
-import org.apache.mxnet.{Context, DataDesc, NDArray, Shape}
+import org.apache.mxnet._
import org.apache.mxnet.module.Module
import scala.collection.mutable.ListBuffer
@@ -36,11 +37,13 @@ private[infer] trait PredictBase {
*
* This method will take input as IndexedSeq one dimensional arrays and creates the
* NDArray needed for inference. The array will be reshaped based on the input descriptors.
- * @param input: An IndexedSequence of a one-dimensional array.
+ * @param input: An Indexed Sequence of a one-dimensional array of datatype
+ * Float or Double
An IndexedSequence is needed when the model has more than one input.
* @return Indexed sequence array of outputs
*/
- def predict(input: IndexedSeq[Array[Float]]): IndexedSeq[Array[Float]]
+ def predict[@specialized (Base.MX_PRIMITIVES) T](input: IndexedSeq[Array[T]])
+ : IndexedSeq[Array[T]]
/**
* Predict using NDArray as input.
@@ -123,13 +126,13 @@ class Predictor(modelPathPrefix: String,
* Takes input as IndexedSeq one dimensional arrays and creates the NDArray needed for inference
* The array will be reshaped based on the input descriptors.
*
- * @param input: An IndexedSequence of a one-dimensional array.
+ * @param input: An IndexedSequence of a one-dimensional array
+ * of data type Float or Double.
An IndexedSequence is needed when the model has more than one input.
* @return Indexed sequence array of outputs
*/
- override def predict(input: IndexedSeq[Array[Float]])
- : IndexedSeq[Array[Float]] = {
-
+ override def predict[@specialized (Base.MX_PRIMITIVES) T](input: IndexedSeq[Array[T]])
+ : IndexedSeq[Array[T]] = {
require(input.length == inputDescriptors.length,
s"number of inputs provided: ${input.length} does not match number of inputs " +
s"in inputDescriptors: ${inputDescriptors.length}")
@@ -139,12 +142,30 @@ class Predictor(modelPathPrefix: String,
s"number of elements:${i.length} in the input does not match the shape:" +
s"${d.shape.toString()}")
}
+
+ // Infer the dtype of input and call relevant method
+ val result = input(0)(0) match {
+ case d: Double => predictImpl(input.asInstanceOf[IndexedSeq[Array[Double]]])
+ case _ => predictImpl(input.asInstanceOf[IndexedSeq[Array[Float]]])
+ }
+
+ result.asInstanceOf[IndexedSeq[Array[T]]]
+ }
+
+ private def predictImpl[B, A <: MX_PRIMITIVE_TYPE]
+ (input: IndexedSeq[Array[B]])(implicit ev: B => A)
+ : IndexedSeq[Array[B]] = {
+
var inputND: ListBuffer[NDArray] = ListBuffer.empty[NDArray]
for((i, d) <- input.zip(inputDescriptors)) {
val shape = d.shape.toVector.patch(from = batchIndex, patch = Vector(1), replaced = 1)
-
- inputND += mxNetHandler.execute(NDArray.array(i, Shape(shape)))
+ if (d.dtype == DType.Float64) {
+ inputND += mxNetHandler.execute(NDArray.array(i.asInstanceOf[Array[Double]], Shape(shape)))
+ }
+ else {
+ inputND += mxNetHandler.execute(NDArray.array(i.asInstanceOf[Array[Float]], Shape(shape)))
+ }
}
// rebind with batchsize 1
@@ -158,7 +179,8 @@ class Predictor(modelPathPrefix: String,
val resultND = mxNetHandler.execute(mod.predict(new NDArrayIter(
inputND.toIndexedSeq, dataBatchSize = 1)))
- val result = resultND.map((f : NDArray) => f.toArray)
+ val result =
+ resultND.map((f : NDArray) => if (f.dtype == DType.Float64) f.toFloat64Array else f.toArray)
mxNetHandler.execute(inputND.foreach(_.dispose))
mxNetHandler.execute(resultND.foreach(_.dispose))
@@ -168,9 +190,11 @@ class Predictor(modelPathPrefix: String,
mxNetHandler.execute(mod.bind(inputDescriptors, forTraining = false, forceRebind = true))
}
- result
+ result.asInstanceOf[IndexedSeq[Array[B]]]
}
+
+
/**
* Predict using NDArray as input
* This method is useful when the input is a batch of data
diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetectorOutput.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetectorOutput.scala
index 13369c8fcef5..5a6ac7599fa9 100644
--- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetectorOutput.scala
+++ b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetectorOutput.scala
@@ -17,18 +17,55 @@
package org.apache.mxnet.infer.javaapi
+/**
+ * The ObjectDetectorOutput class is a simple POJO helper class that is used to simplify
+ * the interactions with ObjectDetector predict results. The class stores the bounding box
+ * coordinates, name of preicted class, and the probability.
+ */
+
+
class ObjectDetectorOutput (className: String, args: Array[Float]){
+ /**
+ * Gets the predicted class's name.
+ *
+ * @return String representing the name of the predicted class
+ */
def getClassName: String = className
+ /**
+ * Gets the probability of the predicted class.
+ *
+ * @return Float representing the probability of predicted class
+ */
def getProbability: Float = args(0)
+ /**
+ * Gets the minimum X coordinate for the bounding box containing the predicted object.
+ *
+ * @return Float of the min X coordinate for the object bounding box
+ */
def getXMin: Float = args(1)
+ /**
+ * Gets the maximum X coordinate for the bounding box containing the predicted object.
+ *
+ * @return Float of the max X coordinate for the object bounding box
+ */
def getXMax: Float = args(2)
+ /**
+ * Gets the minimum Y coordinate for the bounding box containing the predicted object.
+ *
+ * @return Float of the min Y coordinate for the object bounding box
+ */
def getYMin: Float = args(3)
+ /**
+ * Gets the maximum Y coordinate for the bounding box containing the predicted object.
+ *
+ * @return Float of the max Y coordinate for the object bounding box
+ */
def getYMax: Float = args(4)
}
diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/Predictor.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/Predictor.scala
index 0466693be9bc..146fe93105e4 100644
--- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/Predictor.scala
+++ b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/Predictor.scala
@@ -72,6 +72,30 @@ class Predictor private[mxnet] (val predictor: org.apache.mxnet.infer.Predictor)
predictor.predict(input).toArray
}
+ /**
+ * Takes input as Array of one dimensional arrays and creates the NDArray needed for inference
+ * The array will be reshaped based on the input descriptors. Example of calling in Java:
+ *
+ *
+ * {@code
+ * double tmp[][] = new double[1][224];
+ * for (int x = 0; x < 1; x++)
+ * for (int y = 0; y < 224; y++)
+ * tmp[x][y] = (int)(Math.random()*10);
+ * predictor.predict(tmp);
+ * }
+ *
+ *
+ * @param input: An Array of a one-dimensional array.
+ An extra Array is needed for when the model has more than one input.
+ * @return Indexed sequence array of outputs
+ */
+
+ def predict(input: Array[Array[Double]]):
+ Array[Array[Double]] = {
+ predictor.predict(input).toArray
+ }
+
/**
* Takes input as List of one dimensional arrays and creates the NDArray needed for inference
* The array will be reshaped based on the input descriptors.
diff --git a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorOutputTest.java b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorOutputTest.java
new file mode 100644
index 000000000000..04041fcda9bf
--- /dev/null
+++ b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorOutputTest.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.mxnet.infer.javaapi;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class ObjectDetectorOutputTest {
+
+ private String predictedClassName = "lion";
+
+ private float delta = 0.00001f;
+
+ @Test
+ public void testConstructor() {
+
+ float[] arr = new float[]{0f, 1f, 2f, 3f, 4f};
+
+ ObjectDetectorOutput odOutput = new ObjectDetectorOutput(predictedClassName, arr);
+
+ Assert.assertEquals(odOutput.getClassName(), predictedClassName);
+ Assert.assertEquals("Threshold not matching", odOutput.getProbability(), 0f, delta);
+ Assert.assertEquals("Threshold not matching", odOutput.getXMin(), 1f, delta);
+ Assert.assertEquals("Threshold not matching", odOutput.getXMax(), 2f, delta);
+ Assert.assertEquals("Threshold not matching", odOutput.getYMin(), 3f, delta);
+ Assert.assertEquals("Threshold not matching", odOutput.getYMax(), 4f, delta);
+
+ }
+
+ @Test (expected = ArrayIndexOutOfBoundsException.class)
+ public void testIncompleteArgsConstructor() {
+
+ float[] arr = new float[]{0f, 1f};
+
+ ObjectDetectorOutput odOutput = new ObjectDetectorOutput(predictedClassName, arr);
+
+ Assert.assertEquals(odOutput.getClassName(), predictedClassName);
+ Assert.assertEquals("Threshold not matching", odOutput.getProbability(), 0f, delta);
+ Assert.assertEquals("Threshold not matching", odOutput.getXMin(), 1f, delta);
+
+ // This is where exception will be thrown
+ odOutput.getXMax();
+ }
+}
diff --git a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorTest.java b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorTest.java
new file mode 100644
index 000000000000..a5e64911d141
--- /dev/null
+++ b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.mxnet.infer.javaapi;
+
+import org.apache.mxnet.Layout;
+import org.apache.mxnet.javaapi.DType;
+import org.apache.mxnet.javaapi.DataDesc;
+import org.apache.mxnet.javaapi.NDArray;
+import org.apache.mxnet.javaapi.Shape;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.awt.image.BufferedImage;
+import java.util.ArrayList;
+import java.util.List;
+
+public class ObjectDetectorTest {
+
+ private List inputDesc;
+ private BufferedImage inputImage;
+
+ private List> expectedResult;
+
+ private ObjectDetector objectDetector;
+
+ private int batchSize = 1;
+
+ private int channels = 3;
+
+ private int imageHeight = 512;
+
+ private int imageWidth = 512;
+
+ private String dataName = "data";
+
+ private int topK = 5;
+
+ private String predictedClassName = "lion"; // Random string
+
+ private Shape getTestShape() {
+
+ return new Shape(new int[] {batchSize, channels, imageHeight, imageWidth});
+ }
+
+ @Before
+ public void setUp() {
+
+ inputDesc = new ArrayList<>();
+ inputDesc.add(new DataDesc(dataName, getTestShape(), DType.Float32(), Layout.NCHW()));
+ inputImage = new BufferedImage(imageWidth, imageHeight, BufferedImage.TYPE_INT_RGB);
+ objectDetector = Mockito.mock(ObjectDetector.class);
+ expectedResult = new ArrayList<>();
+ expectedResult.add(new ArrayList());
+ expectedResult.get(0).add(new ObjectDetectorOutput(predictedClassName, new float[]{}));
+ }
+
+ @Test
+ public void testObjectDetectorWithInputImage() {
+
+ Mockito.when(objectDetector.imageObjectDetect(inputImage, topK)).thenReturn(expectedResult);
+ List> actualResult = objectDetector.imageObjectDetect(inputImage, topK);
+ Mockito.verify(objectDetector, Mockito.times(1)).imageObjectDetect(inputImage, topK);
+ Assert.assertEquals(expectedResult, actualResult);
+ }
+
+
+ @Test
+ public void testObjectDetectorWithBatchImage() {
+
+ List batchImage = new ArrayList<>();
+ batchImage.add(inputImage);
+ Mockito.when(objectDetector.imageBatchObjectDetect(batchImage, topK)).thenReturn(expectedResult);
+ List> actualResult = objectDetector.imageBatchObjectDetect(batchImage, topK);
+ Mockito.verify(objectDetector, Mockito.times(1)).imageBatchObjectDetect(batchImage, topK);
+ Assert.assertEquals(expectedResult, actualResult);
+ }
+
+ @Test
+ public void testObjectDetectorWithNDArrayInput() {
+
+ NDArray inputArr = ObjectDetector.bufferedImageToPixels(inputImage, getTestShape());
+ List inputL = new ArrayList<>();
+ inputL.add(inputArr);
+ Mockito.when(objectDetector.objectDetectWithNDArray(inputL, 5)).thenReturn(expectedResult);
+ List> actualResult = objectDetector.objectDetectWithNDArray(inputL, topK);
+ Mockito.verify(objectDetector, Mockito.times(1)).objectDetectWithNDArray(inputL, topK);
+ Assert.assertEquals(expectedResult, actualResult);
+ }
+}
diff --git a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/PredictorTest.java b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/PredictorTest.java
new file mode 100644
index 000000000000..e7a6c9652346
--- /dev/null
+++ b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/PredictorTest.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.mxnet.infer.javaapi;
+
+import org.apache.mxnet.javaapi.Context;
+import org.apache.mxnet.javaapi.NDArray;
+import org.apache.mxnet.javaapi.Shape;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class PredictorTest {
+
+ Predictor mockPredictor;
+
+ @Before
+ public void setUp() {
+ mockPredictor = Mockito.mock(Predictor.class);
+ }
+
+ @Test
+ public void testPredictWithFloatArray() {
+
+ float tmp[][] = new float[1][224];
+ for (int x = 0; x < 1; x++) {
+ for (int y = 0; y < 224; y++)
+ tmp[x][y] = (int) (Math.random() * 10);
+ }
+
+ float [][] expectedResult = new float[][] {{1f, 2f}};
+ Mockito.when(mockPredictor.predict(tmp)).thenReturn(expectedResult);
+ float[][] actualResult = mockPredictor.predict(tmp);
+
+ Mockito.verify(mockPredictor, Mockito.times(1)).predict(tmp);
+ Assert.assertArrayEquals(expectedResult, actualResult);
+ }
+
+ @Test
+ public void testPredictWithNDArray() {
+
+ float[] tmpArr = new float[224];
+ for (int y = 0; y < 224; y++)
+ tmpArr[y] = (int) (Math.random() * 10);
+
+ NDArray arr = new org.apache.mxnet.javaapi.NDArray(tmpArr, new Shape(new int[] {1, 1, 1, 224}), new Context("cpu", 0));
+
+ List inputList = new ArrayList<>();
+ inputList.add(arr);
+
+ NDArray expected = new NDArray(tmpArr, new Shape(new int[] {1, 1, 1, 224}), new Context("cpu", 0));
+ List expectedResult = new ArrayList<>();
+ expectedResult.add(expected);
+
+ Mockito.when(mockPredictor.predictWithNDArray(inputList)).thenReturn(expectedResult);
+
+ List actualOutput = mockPredictor.predictWithNDArray(inputList);
+
+ Mockito.verify(mockPredictor, Mockito.times(1)).predictWithNDArray(inputList);
+
+ Assert.assertEquals(expectedResult, actualOutput);
+ }
+
+ @Test
+ public void testPredictWithListOfFloatsAsInput() {
+ List> input = new ArrayList<>();
+
+ input.add(Arrays.asList(new Float[] {1f, 2f}));
+
+ List> expectedOutput = new ArrayList<>(input);
+
+ Mockito.when(mockPredictor.predict(input)).thenReturn(expectedOutput);
+
+ List> actualOutput = mockPredictor.predict(input);
+
+ Mockito.verify(mockPredictor, Mockito.times(1)).predict(input);
+
+ Assert.assertEquals(expectedOutput, actualOutput);
+
+ }
+}
\ No newline at end of file
diff --git a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ClassifierSuite.scala b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ClassifierSuite.scala
index b28aeba1deed..d9ccec468791 100644
--- a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ClassifierSuite.scala
+++ b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ClassifierSuite.scala
@@ -22,7 +22,7 @@ import java.nio.file.{Files, Paths}
import java.util
import org.apache.mxnet.module.Module
-import org.apache.mxnet.{Context, DataDesc, NDArray, Shape}
+import org.apache.mxnet.{Context, DType, DataDesc, NDArray, Shape}
import org.mockito.Matchers._
import org.mockito.Mockito
import org.scalatest.{BeforeAndAfterAll, FunSuite}
@@ -127,6 +127,29 @@ class ClassifierSuite extends FunSuite with BeforeAndAfterAll {
}
+ test("ClassifierSuite-flatFloat64Array-topK") {
+ val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2)))
+ val inputData = Array.fill[Double](12)(1d)
+
+ val predictResult : IndexedSeq[Array[Double]] =
+ IndexedSeq[Array[Double]](Array(.98d, 0.97d, 0.96d, 0.99d))
+
+ val testClassifier = new MyClassifier(modelPath, inputDescriptor)
+
+ Mockito.doReturn(predictResult).when(testClassifier.predictor)
+ .predict(any(classOf[IndexedSeq[Array[Double]]]))
+
+ val result: IndexedSeq[(String, Double)] = testClassifier.
+ classify(IndexedSeq(inputData), topK = Some(10))
+
+ assert((result(0)_2).getClass == 1d.getClass)
+
+ assertResult(predictResult(0).sortBy(-_)) {
+ result.map(_._2).toArray
+ }
+
+ }
+
test("ClassifierSuite-flatArrayInput") {
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2)))
val inputData = Array.fill[Float](12)(1)
@@ -147,6 +170,28 @@ class ClassifierSuite extends FunSuite with BeforeAndAfterAll {
}
}
+ test("ClassifierSuite-flatArrayFloat64Input") {
+ val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2)))
+ val inputData = Array.fill[Double](12)(1d)
+
+ val predictResult : IndexedSeq[Array[Double]] =
+ IndexedSeq[Array[Double]](Array(.98d, 0.97d, 0.96d, 0.99d))
+
+ val testClassifier = new MyClassifier(modelPath, inputDescriptor)
+
+ Mockito.doReturn(predictResult).when(testClassifier.predictor)
+ .predict(any(classOf[IndexedSeq[Array[Double]]]))
+
+ val result: IndexedSeq[(String, Double)] = testClassifier.
+ classify(IndexedSeq(inputData))
+
+ assert((result(0)_2).getClass == 1d.getClass)
+
+ assertResult(predictResult(0)) {
+ result.map(_._2).toArray
+ }
+ }
+
test("ClassifierSuite-NDArray1InputWithoutTopK") {
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2)))
val inputDataShape = Shape(1, 3, 2, 2)
diff --git a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala
index 1c291e1e7b3c..5198c4a1f309 100644
--- a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala
+++ b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala
@@ -68,6 +68,10 @@ class ImageClassifierSuite extends ClassifierSuite with BeforeAndAfterAll {
val result = ImageClassifier.bufferedImageToPixels(image2, Shape(3, 2, 2))
assert(result.shape == inputDescriptor(0).shape.drop(1))
+ assert(result.dtype == DType.Float32)
+
+ val resultFloat64 = ImageClassifier.bufferedImageToPixels(image2, Shape(3, 2, 2), DType.Float64)
+ assert(resultFloat64.dtype == DType.Float64)
}
test("ImageClassifierSuite-testWithInputImage") {
@@ -106,8 +110,10 @@ class ImageClassifierSuite extends ClassifierSuite with BeforeAndAfterAll {
predictResult(i).map(_._2).toArray
}
}
+
}
+
test("ImageClassifierSuite-testWithInputBatchImage") {
val dType = DType.Float32
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 512, 512),
@@ -152,4 +158,5 @@ class ImageClassifierSuite extends ClassifierSuite with BeforeAndAfterAll {
}
}
}
+
}
diff --git a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala
index 509ffb35db8d..9afbc9b3d4a8 100644
--- a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala
+++ b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala
@@ -19,7 +19,7 @@ package org.apache.mxnet.infer
import org.apache.mxnet.io.NDArrayIter
import org.apache.mxnet.module.{BaseModule, Module}
-import org.apache.mxnet.{DataDesc, Layout, NDArray, Shape}
+import org.apache.mxnet._
import org.mockito.Matchers._
import org.mockito.Mockito
import org.scalatest.{BeforeAndAfterAll, FunSuite}
@@ -91,6 +91,36 @@ class PredictorSuite extends FunSuite with BeforeAndAfterAll {
, any[Option[BaseModule]], any[String])
}
+ test("PredictorSuite-testWithFlatFloat64Arrays") {
+
+ val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2),
+ layout = Layout.NCHW, dtype = DType.Float64))
+ val inputData = Array.fill[Double](12)(1d)
+
+ // this will disposed at the end of the predict call on Predictor.
+ val predictResult = IndexedSeq(NDArray.ones(Shape(1, 3, 2, 2), dtype = DType.Float64))
+
+ val testPredictor = new MyPredictor("xyz", inputDescriptor)
+
+ Mockito.doReturn(predictResult).when(testPredictor.mockModule)
+ .predict(any(classOf[NDArrayIter]), any[Int], any[Boolean])
+
+ val testFun = testPredictor.predict(IndexedSeq(inputData))
+
+ assert(testFun.size == 1, "output size should be 1 ")
+
+ assert(testFun(0)(0).getClass == 1d.getClass)
+
+ assert(Array.fill[Double](12)(1d).mkString == testFun(0).mkString)
+
+ // Verify that the module was bound with batch size 1 and rebound back to the original
+ // input descriptor. the number of times is twice here because loadModule overrides the
+ // initial bind.
+ Mockito.verify(testPredictor.mockModule, Mockito.times(2)).bind(any[IndexedSeq[DataDesc]],
+ any[Option[IndexedSeq[DataDesc]]], any[Boolean], any[Boolean], any[Boolean]
+ , any[Option[BaseModule]], any[String])
+ }
+
test("PredictorSuite-testWithNDArray") {
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2),
layout = Layout.NCHW))
diff --git a/scala-package/init-native/linux-x86_64/pom.xml b/scala-package/init-native/linux-x86_64/pom.xml
deleted file mode 100644
index 242f2f3d5626..000000000000
--- a/scala-package/init-native/linux-x86_64/pom.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
- 4.0.0
-
- org.apache.mxnet
- mxnet-scala-init-native-parent
- 1.5.0-SNAPSHOT
- ../pom.xml
-
-
- libmxnet-init-scala-linux-x86_64
- MXNet Scala Package - Initializer Native Linux-x86_64
- http://maven.apache.org
-
- so
-
-
- ${project.parent.parent.basedir}/..
-
-
-
-
- org.apache.mxnet
- mxnet-init_${scala.binary.version}
- 1.5.0-SNAPSHOT
- jar
- compile
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
-
-
- org.codehaus.mojo
- native-maven-plugin
- true
-
-
- linux
- generic-classic
- ${cxx}
- ${cxx}
-
-
-
-
- -std=c++0x
-
-
- -I${MXNET_DIR}/include
- -I${MXNET_DIR}/3rdparty/dmlc-core/include
- -I${MXNET_DIR}/3rdparty/mshadow
- -I${MXNET_DIR}/3rdparty/dlpack/include
- -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
- -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
- -O3 -DNDEBUG=1 -fPIC -msse3 -mf16c
- -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
-
-
- -shared
-
-
- -Wl,--whole-archive
- -Wl,--no-whole-archive -pthread -lm -fopenmp -lrt
-
-
- -Wl,-rpath=${dollar}ORIGIN -lmxnet -L${MXNET_DIR}/lib
-
-
-
-
-
- javah
- generate-sources
-
- default
- ${project.build.directory}/custom-javah
- ${basedir}
- org_apache_mxnet_init_native_c_api.h
-
- org.apache.mxnet.init.LibInfo
-
-
-
- javah
-
-
-
-
-
-
- org.codehaus.mojo
- exec-maven-plugin
- 1.6.0
-
-
- link-native-lib
- generate-resources
-
- exec
-
-
- ln
- -sf ${MXNET_DIR}/lib/libmxnet.so ${project.build.directory}/libmxnet.so
-
-
-
-
-
-
-
diff --git a/scala-package/init-native/osx-x86_64/pom.xml b/scala-package/init-native/osx-x86_64/pom.xml
deleted file mode 100644
index 12f4d800eba4..000000000000
--- a/scala-package/init-native/osx-x86_64/pom.xml
+++ /dev/null
@@ -1,142 +0,0 @@
-
-
- 4.0.0
-
- org.apache.mxnet
- mxnet-scala-init-native-parent
- 1.5.0-SNAPSHOT
- ../pom.xml
-
-
- libmxnet-init-scala-osx-x86_64
- MXNet Scala Package - Initializer Native OSX-x86_64
- http://maven.apache.org
-
- jnilib
-
-
- ${project.parent.parent.basedir}/..
-
-
-
-
- org.apache.mxnet
- mxnet-init_${scala.binary.version}
- 1.5.0-SNAPSHOT
- jar
- compile
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
-
-
- org.codehaus.mojo
- native-maven-plugin
- true
-
-
- darwin
- generic-classic
- ${cxx}
- ${cxx}
-
-
-
-
- -std=c++0x
-
-
- -I${MXNET_DIR}/include
- -I${MXNET_DIR}/3rdparty/dmlc-core/include
- -I${MXNET_DIR}/3rdparty/mshadow
- -I${MXNET_DIR}/3rdparty/dlpack/include
- -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
- -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
- -g -O0 -fPIC -msse3 -mf16c
- -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
-
-
- -shared
-
-
- -framework JavaVM
- -Wl,-exported_symbol,_Java_*
- -Wl,-x
-
-
- -lmxnet -L${MXNET_DIR}/lib
-
-
-
-
-
- javah
- generate-sources
-
- default
- ${project.build.directory}/custom-javah
- ${basedir}
- org_apache_mxnet_init_native_c_api.h
-
- org.apache.mxnet.init.LibInfo
-
-
-
- javah
-
-
-
-
-
-
- org.codehaus.mojo
- exec-maven-plugin
- 1.6.0
-
-
- post-native-build
- package
-
- exec
-
-
- install_name_tool
- -change lib/libmxnet.so @loader_path/libmxnet.so ${project.build.directory}/${artifactId}.jnilib
-
-
-
- link-native-lib
- generate-resources
-
- exec
-
-
- ln
- -sf ${MXNET_DIR}/lib/libmxnet.so ${project.build.directory}/libmxnet.so
-
-
-
-
-
-
-
diff --git a/scala-package/init-native/pom.xml b/scala-package/init-native/pom.xml
index bed216e45035..1721f8cbd403 100644
--- a/scala-package/init-native/pom.xml
+++ b/scala-package/init-native/pom.xml
@@ -5,46 +5,170 @@
4.0.0org.apache.mxnet
- mxnet-parent_2.11
- 1.5.0-SNAPSHOT
+ mxnet-parent
+ INTERNAL../pom.xml
- mxnet-scala-init-native-parent
- MXNet Scala Package - Initializer Native Parent
- pom
+ libmxnet-init-scala
+ MXNet Scala Package - Initializer Native
+
+
+ ${project.parent.basedir}/..
+
+
+ ${libtype}
- osx-x86_64-cpu
-
- osx-x86_64
-
-
-
- linux-x86_64-cpu
-
- linux-x86_64
-
+ osx-x86_64
+
+ mac
+
+
+
+
+ org.codehaus.mojo
+ native-maven-plugin
+ true
+
+ darwin
+ generic-classic
+ ${cxx}
+ ${cxx}
+
+
+
+
+ -std=c++0x
+
+
+ -I${MXNET_DIR}/include
+ -I${MXNET_DIR}/3rdparty/dmlc-core/include
+ -I${MXNET_DIR}/3rdparty/mshadow
+ -I${MXNET_DIR}/3rdparty/dlpack/include
+ -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
+ -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
+ -g -O0 -fPIC -msse3 -mf16c
+ -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
+
+
+ -shared
+
+
+ -framework JavaVM
+ -Wl,-exported_symbol,_Java_*
+ -Wl,-x
+
+
+ -lmxnet -L${MXNET_DIR}/lib
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
+
+
+ post-native-build
+ package
+
+ exec
+
+
+ install_name_tool
+ -add_rpath @loader_path ${project.build.directory}/${project.artifactId}.jnilib
+
+
+
+
+
+
- linux-x86_64-gpu
-
- linux-x86_64
-
+ linux-x86_64
+
+
+ unix
+ Linux
+
+
+
+
+
+ org.codehaus.mojo
+ native-maven-plugin
+ true
+
+ linux
+ generic-classic
+ ${cxx}
+ ${cxx}
+
+
+
+
+ -std=c++0x
+
+
+ -I${MXNET_DIR}/include
+ -I${MXNET_DIR}/3rdparty/dmlc-core/include
+ -I${MXNET_DIR}/3rdparty/mshadow
+ -I${MXNET_DIR}/3rdparty/dlpack/include
+ -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
+ -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
+ -O3 -DNDEBUG=1 -fPIC -msse3 -mf16c
+ -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
+
+
+ -shared
+
+
+ -Wl,--whole-archive
+ -Wl,--no-whole-archive -pthread -lm -fopenmp -lrt
+
+
+ -Wl,-rpath=${dollar}ORIGIN -lmxnet -L${MXNET_DIR}/lib
+
+
+
+
+
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
+
+
+ link-native-lib
+ generate-resources
+
+ exec
+
+
+ bash
+ -c 'ln -sf ${MXNET_DIR}/lib/* ${project.build.directory}/'
+
+
+
-
diff --git a/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h b/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h
new file mode 100644
index 000000000000..6ff6ae6a107c
--- /dev/null
+++ b/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h
@@ -0,0 +1,45 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include
+/* Header for class org_apache_mxnet_init_LibInfo */
+
+#ifndef _Included_org_apache_mxnet_init_LibInfo
+#define _Included_org_apache_mxnet_init_LibInfo
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_apache_mxnet_init_LibInfo
+ * Method: mxSymbolListAtomicSymbolCreators
+ * Signature: (Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxSymbolListAtomicSymbolCreators
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_init_LibInfo
+ * Method: mxSymbolGetAtomicSymbolInfo
+ * Signature: (JLorg/apache/mxnet/init/Base/RefString;Lorg/apache/mxnet/init/Base/RefString;Lorg/apache/mxnet/init/Base/RefInt;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/init/Base/RefString;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxSymbolGetAtomicSymbolInfo
+ (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_init_LibInfo
+ * Method: mxListAllOpNames
+ * Signature: (Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxListAllOpNames
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_init_LibInfo
+ * Method: nnGetOpHandle
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/init/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_nnGetOpHandle
+ (JNIEnv *, jobject, jstring, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/scala-package/init/pom.xml b/scala-package/init/pom.xml
index 4278df6f2e73..a0bb6be384b5 100644
--- a/scala-package/init/pom.xml
+++ b/scala-package/init/pom.xml
@@ -5,65 +5,62 @@
4.0.0org.apache.mxnet
- mxnet-parent_2.11
- 1.5.0-SNAPSHOT
-
+ mxnet-parent
+ INTERNAL
+ ../pom.xml
- mxnet-init_2.11
+ mxnet-scala-initMXNet Scala Package - Initializer
-
-
- osx-x86_64-cpu
-
- osx-x86_64-cpu
-
-
-
- linux-x86_64-cpu
-
- linux-x86_64-cpu
-
-
-
- linux-x86_64-gpu
-
- linux-x86_64-gpu
-
-
-
- apache-release
-
-
-
-
- org.codehaus.mojo
- exec-maven-plugin
- 1.6.0
-
-
- compile-mxnet-backend
- compile
-
- exec
-
-
- bash
- ${project.parent.basedir}/dev/compile-mxnet-backend.sh ${build.platform} ${project.parent.basedir}/../
-
-
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
+
+
+
+ org.codehaus.mojo
+ native-maven-plugin
+ true
+
+
+ javah
+ verify
- true
+ default
+ ${project.build.directory}/custom-javah
+ ${basedir}
+ org_apache_mxnet_init_native_c_api.h
+
+ org.apache.mxnet.init.LibInfo
+
-
-
-
-
-
+
+ javah
+
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
+
+
+ verify-javah
+ verify
+
+ exec
+
+
+ diff
+
+ ${project.build.directory}/custom-javah/org_apache_mxnet_init_native_c_api.h
+ ${project.parent.basedir}/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h
+
+
+
+
+
+
+
+
diff --git a/scala-package/init/src/main/scala/org/apache/mxnet/init/Base.scala b/scala-package/init/src/main/scala/org/apache/mxnet/init/Base.scala
index 7402dbd3bc1d..b5a6286af1b6 100644
--- a/scala-package/init/src/main/scala/org/apache/mxnet/init/Base.scala
+++ b/scala-package/init/src/main/scala/org/apache/mxnet/init/Base.scala
@@ -17,6 +17,8 @@
package org.apache.mxnet.init
+import java.io.File
+
object Base {
tryLoadInitLibrary()
val _LIB = new LibInfo
@@ -37,18 +39,22 @@ object Base {
@throws(classOf[UnsatisfiedLinkError])
private def tryLoadInitLibrary(): Unit = {
- var baseDir = System.getProperty("user.dir") + "/init-native"
- // TODO(lanKing520) Update this to use relative path to the MXNet director.
- // TODO(lanking520) baseDir = sys.env("MXNET_BASEDIR") + "/scala-package/init-native"
- if (System.getenv().containsKey("MXNET_BASEDIR")) {
- baseDir = sys.env("MXNET_BASEDIR")
+ var userDir : File = new File(System.getProperty("user.dir"))
+ var nativeDir : File = new File(userDir, "init-native")
+ if (!nativeDir.exists()) {
+ nativeDir = new File(userDir.getParent, "init-native")
+ if (!nativeDir.exists()) {
+ throw new IllegalStateException("scala-init should be executed inside scala-package folder")
+ }
}
+ val baseDir = nativeDir.getAbsolutePath
+
val os = System.getProperty("os.name")
// ref: http://lopica.sourceforge.net/os.html
if (os.startsWith("Linux")) {
- System.load(s"$baseDir/linux-x86_64/target/libmxnet-init-scala-linux-x86_64.so")
+ System.load(s"$baseDir/target/libmxnet-init-scala.so")
} else if (os.startsWith("Mac")) {
- System.load(s"$baseDir/osx-x86_64/target/libmxnet-init-scala-osx-x86_64.jnilib")
+ System.load(s"$baseDir/target/libmxnet-init-scala.jnilib")
} else {
// TODO(yizhi) support windows later
throw new UnsatisfiedLinkError()
diff --git a/scala-package/macros/pom.xml b/scala-package/macros/pom.xml
index cd56060b4b36..52dfde181d72 100644
--- a/scala-package/macros/pom.xml
+++ b/scala-package/macros/pom.xml
@@ -5,63 +5,20 @@
4.0.0org.apache.mxnet
- mxnet-parent_2.11
- 1.5.0-SNAPSHOT
+ mxnet-parent
+ INTERNAL../pom.xml
- mxnet-macros_2.11
+ mxnet-macrosMXNet Scala Package - Macros
-
-
- unittest
-
- false
-
-
-
- integrationtest
-
- true
-
-
-
- osx-x86_64-cpu
-
- osx-x86_64
- jnilib
-
-
-
- linux-x86_64-cpu
-
- linux-x86_64
- so
-
-
-
- linux-x86_64-gpu
-
- linux-x86_64
- so
-
-
-
-
org.apache.mxnet
- mxnet-init_${scala.binary.version}
- 1.5.0-SNAPSHOT
- provided
-
-
- org.apache.mxnet
- libmxnet-init-scala-${platform}
- 1.5.0-SNAPSHOT
+ mxnet-scala-init
+ INTERNALprovided
- ${libtype}commons-io
@@ -70,16 +27,8 @@
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
- org.apache.maven.pluginsmaven-jar-plugin
@@ -95,39 +44,15 @@
org.apache.maven.pluginsmaven-compiler-plugin
-
- org.codehaus.mojo
- exec-maven-plugin
- 1.6.0
-
-
- apidoc-generation
- package
-
- java
-
-
-
-
-
- ${project.parent.basedir}/init/target/classes
-
-
- ${project.parent.basedir}/core/src/main/scala/org/apache/mxnet/
-
- org.apache.mxnet.APIDocGenerator
-
- org.scalatestscalatest-maven-plugin
- ${skiptest}${project.parent.basedir}/init-native
- -Djava.library.path=${project.parent.basedir}/native/${platform}/target \
+ -Djava.library.path=${project.parent.basedir}/native/target \
-Dlog4j.configuration=file://${project.basedir}/src/test/resources/log4j.properties
@@ -138,5 +63,4 @@
-
diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala
index ce12dc7cd5a0..ede16f73d2a1 100644
--- a/scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala
+++ b/scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala
@@ -27,13 +27,15 @@ import scala.collection.mutable.ListBuffer
* Two file namely: SymbolAPIBase.scala and NDArrayAPIBase.scala
* The code will be executed during Macros stage and file live in Core stage
*/
-private[mxnet] object APIDocGenerator extends GeneratorBase {
+private[mxnet] object APIDocGenerator extends GeneratorBase with RandomHelpers {
def main(args: Array[String]): Unit = {
val FILE_PATH = args(0)
val hashCollector = ListBuffer[String]()
hashCollector += typeSafeClassGen(FILE_PATH, true)
hashCollector += typeSafeClassGen(FILE_PATH, false)
+ hashCollector += typeSafeRandomClassGen(FILE_PATH, true)
+ hashCollector += typeSafeRandomClassGen(FILE_PATH, false)
hashCollector += nonTypeSafeClassGen(FILE_PATH, true)
hashCollector += nonTypeSafeClassGen(FILE_PATH, false)
hashCollector += javaClassGen(FILE_PATH)
@@ -57,8 +59,27 @@ private[mxnet] object APIDocGenerator extends GeneratorBase {
writeFile(
FILE_PATH,
+ "package org.apache.mxnet",
if (isSymbol) "SymbolAPIBase" else "NDArrayAPIBase",
+ "import org.apache.mxnet.annotation.Experimental",
+ generated)
+ }
+
+ def typeSafeRandomClassGen(FILE_PATH: String, isSymbol: Boolean): String = {
+ val generated = typeSafeRandomFunctionsToGenerate(isSymbol)
+ .map { func =>
+ val scalaDoc = generateAPIDocFromBackend(func)
+ val typeParameter = randomGenericTypeSpec(isSymbol, false)
+ val decl = generateAPISignature(func, isSymbol, typeParameter)
+ s"$scalaDoc\n$decl"
+ }
+
+ writeFile(
+ FILE_PATH,
"package org.apache.mxnet",
+ if (isSymbol) "SymbolRandomAPIBase" else "NDArrayRandomAPIBase",
+ """import org.apache.mxnet.annotation.Experimental
+ |import scala.reflect.ClassTag""".stripMargin,
generated)
}
@@ -85,8 +106,9 @@ private[mxnet] object APIDocGenerator extends GeneratorBase {
writeFile(
FILE_PATH,
- if (isSymbol) "SymbolBase" else "NDArrayBase",
"package org.apache.mxnet",
+ if (isSymbol) "SymbolBase" else "NDArrayBase",
+ "import org.apache.mxnet.annotation.Experimental",
absFuncs)
}
@@ -110,7 +132,12 @@ private[mxnet] object APIDocGenerator extends GeneratorBase {
}).toSeq
val packageName = "NDArrayBase"
val packageDef = "package org.apache.mxnet.javaapi"
- writeFile(filePath + "javaapi/", packageName, packageDef, absFuncs)
+ writeFile(
+ filePath + "javaapi/",
+ packageDef,
+ packageName,
+ "import org.apache.mxnet.annotation.Experimental",
+ absFuncs)
}
def generateAPIDocFromBackend(func: Func, withParam: Boolean = true): String = {
@@ -146,7 +173,7 @@ private[mxnet] object APIDocGenerator extends GeneratorBase {
}
}
- def generateAPISignature(func: Func, isSymbol: Boolean): String = {
+ def generateAPISignature(func: Func, isSymbol: Boolean, typeParameter: String = ""): String = {
val argDef = ListBuffer[String]()
argDef ++= typedFunctionCommonArgDef(func)
@@ -162,7 +189,7 @@ private[mxnet] object APIDocGenerator extends GeneratorBase {
val returnType = func.returnType
s"""@Experimental
- |def ${func.name} (${argDef.mkString(", ")}): $returnType""".stripMargin
+ |def ${func.name}$typeParameter (${argDef.mkString(", ")}): $returnType""".stripMargin
}
def generateJavaAPISignature(func : Func) : String = {
@@ -223,30 +250,30 @@ private[mxnet] object APIDocGenerator extends GeneratorBase {
}
}
- def writeFile(FILE_PATH: String, className: String, packageDef: String,
- absFuncs: Seq[String]): String = {
+ def writeFile(FILE_PATH: String, packageDef: String, className: String,
+ imports: String, absFuncs: Seq[String]): String = {
val finalStr =
s"""/*
- |* Licensed to the Apache Software Foundation (ASF) under one or more
- |* contributor license agreements. See the NOTICE file distributed with
- |* this work for additional information regarding copyright ownership.
- |* The ASF licenses this file to You under the Apache License, Version 2.0
- |* (the "License"); you may not use this file except in compliance with
- |* the License. You may obtain a copy of the License at
- |*
- |* http://www.apache.org/licenses/LICENSE-2.0
- |*
- |* Unless required by applicable law or agreed to in writing, software
- |* distributed under the License is distributed on an "AS IS" BASIS,
- |* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- |* See the License for the specific language governing permissions and
- |* limitations under the License.
- |*/
+ | * Licensed to the Apache Software Foundation (ASF) under one or more
+ | * contributor license agreements. See the NOTICE file distributed with
+ | * this work for additional information regarding copyright ownership.
+ | * The ASF licenses this file to You under the Apache License, Version 2.0
+ | * (the "License"); you may not use this file except in compliance with
+ | * the License. You may obtain a copy of the License at
+ | *
+ | * http://www.apache.org/licenses/LICENSE-2.0
+ | *
+ | * Unless required by applicable law or agreed to in writing, software
+ | * distributed under the License is distributed on an "AS IS" BASIS,
+ | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ | * See the License for the specific language governing permissions and
+ | * limitations under the License.
+ | */
|
|$packageDef
|
- |import org.apache.mxnet.annotation.Experimental
+ |$imports
|
|// scalastyle:off
|abstract class $className {
diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala
index 9245ef1b437f..498c4e943669 100644
--- a/scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala
+++ b/scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala
@@ -23,7 +23,7 @@ import org.apache.mxnet.utils.{CToScalaUtils, OperatorBuildUtils}
import scala.collection.mutable.ListBuffer
import scala.reflect.macros.blackbox
-abstract class GeneratorBase {
+private[mxnet] abstract class GeneratorBase {
type Handle = Long
case class Arg(argName: String, argType: String, argDesc: String, isOptional: Boolean) {
@@ -46,7 +46,8 @@ abstract class GeneratorBase {
}
}
- def typeSafeFunctionsToGenerate(isSymbol: Boolean, isContrib: Boolean): List[Func] = {
+ // filter the operators to generate in the type-safe Symbol.api and NDArray.api
+ protected def typeSafeFunctionsToGenerate(isSymbol: Boolean, isContrib: Boolean): List[Func] = {
// Operators that should not be generated
val notGenerated = Set("Custom")
@@ -95,7 +96,7 @@ abstract class GeneratorBase {
else if (isSymbol) "org.apache.mxnet.Symbol"
else "org.apache.mxnet.NDArray"
val typeAndOption =
- CToScalaUtils.argumentCleaner(argName, argType, family)
+ CToScalaUtils.argumentCleaner(argName, argType, family, isJava)
Arg(argName, typeAndOption._1, argDesc, typeAndOption._2)
}
val returnType =
@@ -144,8 +145,8 @@ abstract class GeneratorBase {
result
}
+ // build function argument definition, with optionality, and safe names
protected def typedFunctionCommonArgDef(func: Func): List[String] = {
- // build function argument definition, with optionality, and safe names
func.listOfArgs.map(arg =>
if (arg.isOptional) {
// let's avoid a stupid Option[Array[...]]
@@ -161,3 +162,71 @@ abstract class GeneratorBase {
)
}
}
+
+// a mixin to ease generating the Random module
+private[mxnet] trait RandomHelpers {
+ self: GeneratorBase =>
+
+ // a generic type spec used in Symbol.random and NDArray.random modules
+ protected def randomGenericTypeSpec(isSymbol: Boolean, fullPackageSpec: Boolean): String = {
+ val classTag = if (fullPackageSpec) "scala.reflect.ClassTag" else "ClassTag"
+ if (isSymbol) s"[T: SymbolOrScalar : $classTag]"
+ else s"[T: NDArrayOrScalar : $classTag]"
+ }
+
+ // filter the operators to generate in the type-safe Symbol.random and NDArray.random
+ protected def typeSafeRandomFunctionsToGenerate(isSymbol: Boolean): List[Func] = {
+ getBackEndFunctions(isSymbol)
+ .filter(f => f.name.startsWith("_sample_") || f.name.startsWith("_random_"))
+ .map(f => f.copy(name = f.name.stripPrefix("_")))
+ // unify _random and _sample
+ .map(f => unifyRandom(f, isSymbol))
+ // deduplicate
+ .groupBy(_.name)
+ .mapValues(_.head)
+ .values
+ .toList
+ }
+
+ // unify call targets (random_xyz and sample_xyz) and unify their argument types
+ private def unifyRandom(func: Func, isSymbol: Boolean): Func = {
+ var typeConv = Set("org.apache.mxnet.NDArray", "org.apache.mxnet.Symbol",
+ "Float", "Int")
+
+ func.copy(
+ name = func.name.replaceAll("(random|sample)_", ""),
+ listOfArgs = func.listOfArgs
+ .map(hackNormalFunc)
+ .map(arg =>
+ if (typeConv(arg.argType)) arg.copy(argType = "T")
+ else arg
+ )
+ // TODO: some functions are non consistent in random_ vs sample_ regarding optionality
+ // we may try to unify that as well here.
+ )
+ }
+
+ // hacks to manage the fact that random_normal and sample_normal have
+ // non-consistent parameter naming in the back-end
+ // this first one, merge loc/scale and mu/sigma
+ protected def hackNormalFunc(arg: Arg): Arg = {
+ if (arg.argName == "loc") arg.copy(argName = "mu")
+ else if (arg.argName == "scale") arg.copy(argName = "sigma")
+ else arg
+ }
+
+ // this second one reverts this merge prior to back-end call
+ protected def unhackNormalFunc(func: Func): String = {
+ if (func.name.equals("normal")) {
+ s"""if(target.equals("random_normal")) {
+ | if(map.contains("mu")) { map("loc") = map("mu"); map.remove("mu") }
+ | if(map.contains("sigma")) { map("scale") = map("sigma"); map.remove("sigma") }
+ |}
+ """.stripMargin
+ } else {
+ ""
+ }
+
+ }
+
+}
diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala
index d85abe1ecc4f..c18694b59bf6 100644
--- a/scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala
+++ b/scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala
@@ -18,7 +18,6 @@
package org.apache.mxnet
import scala.annotation.StaticAnnotation
-import scala.collection.mutable.ListBuffer
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
@@ -30,6 +29,14 @@ private[mxnet] class AddNDArrayAPIs(isContrib: Boolean) extends StaticAnnotation
private[mxnet] def macroTransform(annottees: Any*) = macro TypedNDArrayAPIMacro.typeSafeAPIDefs
}
+private[mxnet] class AddNDArrayRandomAPIs(isContrib: Boolean) extends StaticAnnotation {
+ private[mxnet] def macroTransform(annottees: Any*) =
+ macro TypedNDArrayRandomAPIMacro.typeSafeAPIDefs
+}
+
+/**
+ * For non-typed NDArray API
+ */
private[mxnet] object NDArrayMacro extends GeneratorBase {
def addDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
@@ -70,6 +77,9 @@ private[mxnet] object NDArrayMacro extends GeneratorBase {
}
}
+/**
+ * NDArray.api code generation
+ */
private[mxnet] object TypedNDArrayAPIMacro extends GeneratorBase {
def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
@@ -78,9 +88,9 @@ private[mxnet] object TypedNDArrayAPIMacro extends GeneratorBase {
case q"new AddNDArrayAPIs($b)" => c.eval[Boolean](c.Expr(b))
}
- val functions = typeSafeFunctionsToGenerate(isSymbol = false, isContrib)
+ val functionDefs = typeSafeFunctionsToGenerate(isSymbol = false, isContrib)
+ .map(f => buildTypedFunction(c)(f))
- val functionDefs = functions.map(f => buildTypedFunction(c)(f))
structGeneration(c)(functionDefs, annottees: _*)
}
@@ -89,49 +99,136 @@ private[mxnet] object TypedNDArrayAPIMacro extends GeneratorBase {
import c.universe._
val returnType = "org.apache.mxnet.NDArrayFuncReturn"
- val ndarrayType = "org.apache.mxnet.NDArray"
-
- // Construct argument field
- val argDef = ListBuffer[String]()
- argDef ++= typedFunctionCommonArgDef(function)
- argDef += "out : Option[NDArray] = None"
-
- // Construct Implementation field
- var impl = ListBuffer[String]()
- impl += "val map = scala.collection.mutable.Map[String, Any]()"
- impl += s"val args = scala.collection.mutable.ArrayBuffer.empty[$ndarrayType]"
-
- // NDArray arg implementation
- impl ++= function.listOfArgs.map { arg =>
- if (arg.argType.equals(s"Array[$ndarrayType]")) {
- s"args ++= ${arg.safeArgName}"
- } else {
- val base =
- if (arg.argType.equals(ndarrayType)) {
- // ndarrays go to args
+
+ // Construct API arguments declaration
+ val argDecl = super.typedFunctionCommonArgDef(function) :+ "out : Option[NDArray] = None"
+
+ // Map API input args to backend args
+ val backendArgsMapping =
+ function.listOfArgs.map { arg =>
+ // ndarrays go to args, other types go to kwargs
+ if (arg.argType.equals(s"Array[org.apache.mxnet.NDArray]")) {
+ s"args ++= ${arg.safeArgName}.toSeq"
+ } else {
+ val base = if (arg.argType.equals("org.apache.mxnet.NDArray")) {
s"args += ${arg.safeArgName}"
} else {
- // other types go to kwargs
s"""map("${arg.argName}") = ${arg.safeArgName}"""
}
- if (arg.isOptional) s"if (!${arg.safeArgName}.isEmpty) $base.get"
- else base
+ if (arg.isOptional) s"if (!${arg.safeArgName}.isEmpty) $base.get"
+ else base
+ }
}
- }
- impl +=
- s"""if (!out.isEmpty) map("out") = out.get
- |org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke(
- | "${function.name}", args.toSeq, map.toMap)
+ val impl =
+ s"""
+ |def ${function.name}
+ | (${argDecl.mkString(",")}): $returnType = {
+ |
+ | val map = scala.collection.mutable.Map[String, Any]()
+ | val args = scala.collection.mutable.ArrayBuffer.empty[org.apache.mxnet.NDArray]
+ |
+ | if (!out.isEmpty) map("out") = out.get
+ |
+ | ${backendArgsMapping.mkString("\n")}
+ |
+ | org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke(
+ | "${function.name}", args.toSeq, map.toMap)
+ |}
""".stripMargin
- // Combine and build the function string
- val finalStr =
- s"""def ${function.name}
- | (${argDef.mkString(",")}) : $returnType
- | = {${impl.mkString("\n")}}
+ c.parse(impl).asInstanceOf[DefDef]
+ }
+}
+
+
+/**
+ * NDArray.random code generation
+ */
+private[mxnet] object TypedNDArrayRandomAPIMacro extends GeneratorBase
+ with RandomHelpers {
+
+ def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
+ // Note: no contrib managed in this module
+
+ val functionDefs = typeSafeRandomFunctionsToGenerate(isSymbol = false)
+ .map(f => buildTypedFunction(c)(f))
+
+ structGeneration(c)(functionDefs, annottees: _*)
+ }
+
+ protected def buildTypedFunction(c: blackbox.Context)
+ (function: Func): c.universe.DefDef = {
+ import c.universe._
+
+ val returnType = "org.apache.mxnet.NDArrayFuncReturn"
+
+ // Construct API arguments declaration
+ val argDecl = super.typedFunctionCommonArgDef(function) :+ "out : Option[NDArray] = None"
+
+ // Map API input args to backend args
+ val backendArgsMapping =
+ function.listOfArgs.map { arg =>
+ // ndarrays go to args, other types go to kwargs
+ if (arg.argType.equals("Array[org.apache.mxnet.NDArray]")) {
+ s"args ++= ${arg.safeArgName}.toSeq"
+ } else {
+ if (arg.argType.equals("T")) {
+ if (arg.isOptional) {
+ s"""if(${arg.safeArgName}.isDefined) {
+ | if(isScalar) {
+ | map("${arg.argName}") = ${arg.safeArgName}.get
+ | } else {
+ | args += ${arg.safeArgName}.get.asInstanceOf[org.apache.mxnet.NDArray]
+ | }
+ |}
+ """.stripMargin
+ } else {
+ s"""if(isScalar) {
+ | map("${arg.argName}") = ${arg.safeArgName}
+ |} else {
+ | args += ${arg.safeArgName}.asInstanceOf[org.apache.mxnet.NDArray]
+ |}
+ """.stripMargin
+ }
+ } else {
+ if (arg.isOptional) {
+ s"""if (${arg.safeArgName}.isDefined) map("${arg.argName}")=${arg.safeArgName}.get"""
+ } else {
+ s"""map("${arg.argName}") = ${arg.safeArgName}"""
+ }
+ }
+ }
+ }
+
+ val impl =
+ s"""
+ |def ${function.name}${randomGenericTypeSpec(false, true)}
+ | (${argDecl.mkString(",")}): $returnType = {
+ |
+ | val map = scala.collection.mutable.Map[String, Any]()
+ | val args = scala.collection.mutable.ArrayBuffer.empty[org.apache.mxnet.NDArray]
+ | val isScalar = NDArrayOrScalar[T].isScalar
+ |
+ | if(out.isDefined) map("out") = out.get
+ |
+ | ${backendArgsMapping.mkString("\n")}
+ |
+ | val target = if(isScalar) {
+ | "random_${function.name}"
+ | } else {
+ | "sample_${function.name}"
+ | }
+ |
+ | ${unhackNormalFunc(function)}
+ |
+ | org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke(
+ | target, args.toSeq, map.toMap)
+ |}
""".stripMargin
- c.parse(finalStr).asInstanceOf[DefDef]
+ c.parse(impl).asInstanceOf[DefDef]
}
+
+
}
diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/SymbolMacro.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/SymbolMacro.scala
index ab864e1ef195..7ec80b9c066c 100644
--- a/scala-package/macros/src/main/scala/org/apache/mxnet/SymbolMacro.scala
+++ b/scala-package/macros/src/main/scala/org/apache/mxnet/SymbolMacro.scala
@@ -17,8 +17,8 @@
package org.apache.mxnet
+
import scala.annotation.StaticAnnotation
-import scala.collection.mutable.ListBuffer
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
@@ -30,6 +30,14 @@ private[mxnet] class AddSymbolAPIs(isContrib: Boolean) extends StaticAnnotation
private[mxnet] def macroTransform(annottees: Any*) = macro TypedSymbolAPIMacro.typeSafeAPIDefs
}
+private[mxnet] class AddSymbolRandomAPIs(isContrib: Boolean) extends StaticAnnotation {
+ private[mxnet] def macroTransform(annottees: Any*) =
+ macro TypedSymbolRandomAPIMacro.typeSafeAPIDefs
+}
+
+/**
+ * For non-typed Symbol API
+ */
private[mxnet] object SymbolMacro extends GeneratorBase {
def addDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
@@ -63,6 +71,9 @@ private[mxnet] object SymbolMacro extends GeneratorBase {
}
}
+/**
+ * Symbol.api code generation
+ */
private[mxnet] object TypedSymbolAPIMacro extends GeneratorBase {
def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
@@ -71,9 +82,9 @@ private[mxnet] object TypedSymbolAPIMacro extends GeneratorBase {
case q"new AddSymbolAPIs($b)" => c.eval[Boolean](c.Expr(b))
}
- val functions = typeSafeFunctionsToGenerate(isSymbol = true, isContrib)
+ val functionDefs = typeSafeFunctionsToGenerate(isSymbol = true, isContrib)
+ .map(f => buildTypedFunction(c)(f))
- val functionDefs = functions.map(f => buildTypedFunction(c)(f))
structGeneration(c)(functionDefs, annottees: _*)
}
@@ -82,45 +93,111 @@ private[mxnet] object TypedSymbolAPIMacro extends GeneratorBase {
import c.universe._
val returnType = "org.apache.mxnet.Symbol"
- val symbolType = "org.apache.mxnet.Symbol"
-
- // Construct argument field
- val argDef = ListBuffer[String]()
- argDef ++= typedFunctionCommonArgDef(function)
- argDef += "name : String = null"
- argDef += "attr : Map[String, String] = null"
-
- // Construct Implementation field
- val impl = ListBuffer[String]()
- impl += "val map = scala.collection.mutable.Map[String, Any]()"
- impl += s"var args = scala.collection.Seq[$symbolType]()"
-
- // Symbol arg implementation
- impl ++= function.listOfArgs.map { arg =>
- if (arg.argType.equals(s"Array[$symbolType]")) {
- s"if (!${arg.safeArgName}.isEmpty) args = ${arg.safeArgName}.toSeq"
- } else {
- // all go in kwargs
- if (arg.isOptional) {
- s"""if (!${arg.safeArgName}.isEmpty) map("${arg.argName}") = ${arg.safeArgName}.get"""
+
+ // Construct API arguments declaration
+ val argDecl = super.typedFunctionCommonArgDef(function) :+
+ "name : String = null" :+
+ "attr : Map[String, String] = null"
+
+ // Map API input args to backend args
+ val backendArgsMapping =
+ function.listOfArgs.map { arg =>
+ if (arg.argType.equals(s"Array[org.apache.mxnet.Symbol]")) {
+ s"args = ${arg.safeArgName}.toSeq"
} else {
- s"""map("${arg.argName}") = ${arg.safeArgName}"""
+ // all go in kwargs
+ if (arg.isOptional) {
+ s"""if (!${arg.safeArgName}.isEmpty) map("${arg.argName}") = ${arg.safeArgName}.get"""
+ } else {
+ s"""map("${arg.argName}") = ${arg.safeArgName}"""
+ }
}
}
- }
- impl +=
- s"""org.apache.mxnet.Symbol.createSymbolGeneral(
- | "${function.name}", name, attr, args, map.toMap)
+ val impl =
+ s"""
+ |def ${function.name}
+ | (${argDecl.mkString(",")}): $returnType = {
+ |
+ | val map = scala.collection.mutable.Map[String, Any]()
+ | var args = scala.collection.Seq[org.apache.mxnet.Symbol]()
+ |
+ | ${backendArgsMapping.mkString("\n")}
+ |
+ | org.apache.mxnet.Symbol.createSymbolGeneral(
+ | "${function.name}", name, attr, args, map.toMap)
+ |}
""".stripMargin
- // Combine and build the function string
- val finalStr =
- s"""def ${function.name}
- | (${argDef.mkString(",")}) : $returnType
- | = {${impl.mkString("\n")}}
+ c.parse(impl).asInstanceOf[DefDef]
+ }
+}
+
+
+/**
+ * Symbol.random code generation
+ */
+private[mxnet] object TypedSymbolRandomAPIMacro extends GeneratorBase
+ with RandomHelpers {
+
+ def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
+ val functionDefs = typeSafeRandomFunctionsToGenerate(isSymbol = true)
+ .map(f => buildTypedFunction(c)(f))
+
+ structGeneration(c)(functionDefs, annottees: _*)
+ }
+
+ protected def buildTypedFunction(c: blackbox.Context)
+ (function: Func): c.universe.DefDef = {
+ import c.universe._
+
+ val returnType = "org.apache.mxnet.Symbol"
+
+ // Construct API arguments declaration
+ val argDecl = super.typedFunctionCommonArgDef(function) :+
+ "name : String = null" :+
+ "attr : Map[String, String] = null"
+
+ // Map API input args to backend args
+ val backendArgsMapping =
+ function.listOfArgs.map { arg =>
+ if (arg.argType.equals(s"Array[org.apache.mxnet.Symbol]")) {
+ s"args = ${arg.safeArgName}.toSeq"
+ } else {
+ // all go in kwargs
+ if (arg.isOptional) {
+ s"""if (${arg.safeArgName}.isDefined) map("${arg.argName}") = ${arg.safeArgName}.get"""
+ } else {
+ s"""map("${arg.argName}") = ${arg.safeArgName}"""
+ }
+ }
+ }
+
+ val impl =
+ s"""
+ |def ${function.name}${randomGenericTypeSpec(true, true)}
+ | (${argDecl.mkString(",")}): $returnType = {
+ |
+ | val map = scala.collection.mutable.Map[String, Any]()
+ | var args = scala.collection.Seq[org.apache.mxnet.Symbol]()
+ | val isScalar = SymbolOrScalar[T].isScalar
+ |
+ | ${backendArgsMapping.mkString("\n")}
+ |
+ | val target = if(isScalar) {
+ | "random_${function.name}"
+ | } else {
+ | "sample_${function.name}"
+ | }
+ |
+ | ${unhackNormalFunc(function)}
+ |
+ | org.apache.mxnet.Symbol.createSymbolGeneral(
+ | target, name, attr, args, map.toMap)
+ |}
""".stripMargin
- c.parse(finalStr).asInstanceOf[DefDef]
+ c.parse(impl).asInstanceOf[DefDef]
}
}
+
diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/utils/CToScalaUtils.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/utils/CToScalaUtils.scala
index 2fd8b2e73c7a..57c4cfba10b7 100644
--- a/scala-package/macros/src/main/scala/org/apache/mxnet/utils/CToScalaUtils.scala
+++ b/scala-package/macros/src/main/scala/org/apache/mxnet/utils/CToScalaUtils.scala
@@ -18,23 +18,35 @@ package org.apache.mxnet.utils
private[mxnet] object CToScalaUtils {
-
+ private val javaType = Map(
+ "float" -> "java.lang.Float",
+ "int" -> "java.lang.Integer",
+ "long" -> "java.lang.Long",
+ "double" -> "java.lang.Double",
+ "bool" -> "java.lang.Boolean")
+ private val scalaType = Map(
+ "float" -> "Float",
+ "int" -> "Int",
+ "long" -> "Long",
+ "double" -> "Double",
+ "bool" -> "Boolean")
// Convert C++ Types to Scala Types
def typeConversion(in : String, argType : String = "", argName : String,
- returnType : String) : String = {
+ returnType : String, isJava : Boolean) : String = {
val header = returnType.split("\\.").dropRight(1)
+ val types = if (isJava) javaType else scalaType
in match {
case "Shape(tuple)" | "ShapeorNone" => s"${header.mkString(".")}.Shape"
case "Symbol" | "NDArray" | "NDArray-or-Symbol" => returnType
case "Symbol[]" | "NDArray[]" | "NDArray-or-Symbol[]" | "SymbolorSymbol[]"
=> s"Array[$returnType]"
- case "float" | "real_t" | "floatorNone" => "java.lang.Float"
- case "int" | "intorNone" | "int(non-negative)" => "java.lang.Integer"
- case "long" | "long(non-negative)" => "java.lang.Long"
- case "double" | "doubleorNone" => "java.lang.Double"
+ case "float" | "real_t" | "floatorNone" => types("float")
+ case "int" | "intorNone" | "int(non-negative)" => types("int")
+ case "long" | "long(non-negative)" => types("long")
+ case "double" | "doubleorNone" => types("double")
case "string" => "String"
- case "boolean" | "booleanorNone" => "java.lang.Boolean"
+ case "boolean" | "booleanorNone" => types("bool")
case "tupleof" | "tupleof" | "tupleof<>" | "ptr" | "" => "Any"
case default => throw new IllegalArgumentException(
s"Invalid type for args: $default\nString argType: $argType\nargName: $argName")
@@ -54,7 +66,7 @@ private[mxnet] object CToScalaUtils {
* @return (Scala_Type, isOptional)
*/
def argumentCleaner(argName: String, argType : String,
- returnType : String) : (String, Boolean) = {
+ returnType : String, isJava : Boolean) : (String, Boolean) = {
val spaceRemoved = argType.replaceAll("\\s+", "")
var commaRemoved : Array[String] = new Array[String](0)
// Deal with the case e.g: stype : {'csr', 'default', 'row_sparse'}
@@ -72,9 +84,9 @@ private[mxnet] object CToScalaUtils {
s"""expected "optional" got ${commaRemoved(1)}""")
require(commaRemoved(2).startsWith("default="),
s"""expected "default=..." got ${commaRemoved(2)}""")
- (typeConversion(commaRemoved(0), argType, argName, returnType), true)
+ (typeConversion(commaRemoved(0), argType, argName, returnType, isJava), true)
} else if (commaRemoved.length == 2 || commaRemoved.length == 1) {
- val tempType = typeConversion(commaRemoved(0), argType, argName, returnType)
+ val tempType = typeConversion(commaRemoved(0), argType, argName, returnType, isJava)
val tempOptional = tempType.equals("org.apache.mxnet.Symbol")
(tempType, tempOptional)
} else {
diff --git a/scala-package/macros/src/test/scala/org/apache/mxnet/MacrosSuite.scala b/scala-package/macros/src/test/scala/org/apache/mxnet/MacrosSuite.scala
index 4404b0885d57..4069bba25220 100644
--- a/scala-package/macros/src/test/scala/org/apache/mxnet/MacrosSuite.scala
+++ b/scala-package/macros/src/test/scala/org/apache/mxnet/MacrosSuite.scala
@@ -36,14 +36,15 @@ class MacrosSuite extends FunSuite with BeforeAndAfterAll {
)
val output = List(
("org.apache.mxnet.Symbol", true),
- ("java.lang.Integer", false),
+ ("Int", false),
("org.apache.mxnet.Shape", true),
("String", true),
("Any", false)
)
for (idx <- input.indices) {
- val result = CToScalaUtils.argumentCleaner("Sample", input(idx), "org.apache.mxnet.Symbol")
+ val result = CToScalaUtils.argumentCleaner("Sample", input(idx),
+ "org.apache.mxnet.Symbol", false)
assert(result._1 === output(idx)._1 && result._2 === output(idx)._2)
}
}
diff --git a/scala-package/mxnet-demo/java-demo/Makefile b/scala-package/mxnet-demo/java-demo/Makefile
index bb47db1c6d27..4f2b5e938970 100644
--- a/scala-package/mxnet-demo/java-demo/Makefile
+++ b/scala-package/mxnet-demo/java-demo/Makefile
@@ -16,7 +16,7 @@
# under the License.
SCALA_VERSION_PROFILE := 2.11
-MXNET_VERSION := 1.4.0-SNAPSHOT
+MXNET_VERSION := [1.5.0-SNAPSHOT,\)
ifeq ($(OS),Windows_NT)
UNAME_S := Windows
diff --git a/scala-package/mxnet-demo/java-demo/README.md b/scala-package/mxnet-demo/java-demo/README.md
index dbe18052a899..ca2828ae405d 100644
--- a/scala-package/mxnet-demo/java-demo/README.md
+++ b/scala-package/mxnet-demo/java-demo/README.md
@@ -12,7 +12,7 @@ You can use the following instruction as an alternative to achieve the same resu
User are required to use `mvn package` to build the package,
which are shown below:
```Bash
-export SCALA_VERSION_PROFILE=2.11 MXNET_VERSION=1.4.0-SNAPSHOT
+export SCALA_VERSION_PROFILE=2.11 MXNET_VERSION=1.5.0-SNAPSHOT
export SCALA_PKG_PROFILE=
mvn package -Dmxnet.profile=$SCALA_PKG_PROFILE \
-Dmxnet.scalaprofile=$SCALA_VERSION_PROFILE \
@@ -37,6 +37,13 @@ However, you have to define the Classpath before you run the demo code. More inf
The `CLASSPATH` should point to the jar file you have downloaded.
It will load the library automatically and run the example
+
+In order to use the `Param Object`. We requires user to place this line in the front:
+```
+static NDArray$ NDArray = NDArray$.MODULE$;
+```
+It would help to have the NDArray companion object static and accessable from the outside.
+
### Object Detection using Inference API
We also provide an example to do object detection, which downloads a ImageNet trained resnet50 model and runs inference on an image to return the classification result as
```Bash
@@ -80,5 +87,5 @@ sudo apt install libopencv-imgcodecs3.4
Is there any other version available?
-You can find nightly release version from [here](https://repository.apache.org/#nexus-search;gav~org.apache.mxnet~~1.4.0-SNAPSHOT~~).
+You can find nightly release version from [here](https://repository.apache.org/#nexus-search;gav~org.apache.mxnet~~1.5.0-SNAPSHOT~~).
Please keep the same version in the Makefile or [above version](https://repository.apache.org/#nexus-search;gav~org.apache.mxnet~~~~) to run this demo.
diff --git a/scala-package/mxnet-demo/java-demo/bin/java_sample.sh b/scala-package/mxnet-demo/java-demo/bin/java_sample.sh
old mode 100644
new mode 100755
index 2ec9a78c3233..4fb724aca8db
--- a/scala-package/mxnet-demo/java-demo/bin/java_sample.sh
+++ b/scala-package/mxnet-demo/java-demo/bin/java_sample.sh
@@ -16,5 +16,5 @@
# under the License.
#!/bin/bash
CURR_DIR=$(cd $(dirname $0)/../; pwd)
-CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/classes/lib/*
+CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/dependency/*
java -Xmx8G -cp $CLASSPATH mxnet.HelloWorld
\ No newline at end of file
diff --git a/scala-package/mxnet-demo/java-demo/bin/run_od.sh b/scala-package/mxnet-demo/java-demo/bin/run_od.sh
old mode 100644
new mode 100755
index e3c8fd545048..abd0bf5b1b93
--- a/scala-package/mxnet-demo/java-demo/bin/run_od.sh
+++ b/scala-package/mxnet-demo/java-demo/bin/run_od.sh
@@ -16,5 +16,5 @@
# under the License.
#!/bin/bash
CURR_DIR=$(cd $(dirname $0)/../; pwd)
-CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/classes/lib/*
+CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/dependency/*
java -Xmx8G -cp $CLASSPATH mxnet.ObjectDetection
\ No newline at end of file
diff --git a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java b/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java
index 3f209a6c6c84..71981e2691c5 100644
--- a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java
+++ b/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java
@@ -20,9 +20,13 @@
import java.util.Arrays;
public class HelloWorld {
+ static NDArray$ NDArray = NDArray$.MODULE$;
+
public static void main(String[] args) {
System.out.println("Hello World!");
NDArray nd = new NDArray(new float[]{2.0f, 3.0f}, new Shape(new int[]{1, 2}), Context.cpu());
System.out.println(nd.shape());
+ NDArray nd2 = NDArray.dot(NDArray.new dotParam(nd, nd.T()))[0];
+ System.out.println(Arrays.toString(nd2.toArray()));
}
}
diff --git a/scala-package/mxnet-demo/scala-demo/pom.xml b/scala-package/mxnet-demo/scala-demo/pom.xml
index 8fc30e78cac8..a908487cd21d 100644
--- a/scala-package/mxnet-demo/scala-demo/pom.xml
+++ b/scala-package/mxnet-demo/scala-demo/pom.xml
@@ -4,7 +4,7 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0Demo
- mxnet-scala-demo_2.11
+ mxnet-scala-demo1.0-SNAPSHOTMXNet Scala Demopom
diff --git a/scala-package/native/README.md b/scala-package/native/README.md
index cb6dd3890dd2..c87b064fff02 100644
--- a/scala-package/native/README.md
+++ b/scala-package/native/README.md
@@ -6,7 +6,11 @@ MXNet Scala JNI is a thin wrapper layer of underlying libmxnet.so.
JNI native code requires a header file that matches the java/scala interface,
this file is usually generated with javah.
-In our case, jni_helper_func.h is generated and will be used to compile native code.
+In our case, org_apache_mxnet_native_c.h is generated and will be used to compile native code.
+
+To improve build performance, we check in generated org_apache_mxnet_native_c.h file.
+And we added a check to detect mismatch with Scala code and generated header. The checker will
+make sure we won't forget to update org_apache_mxnet_native_c.h file.
## Linker options
diff --git a/scala-package/native/linux-x86_64-cpu/pom.xml b/scala-package/native/linux-x86_64-cpu/pom.xml
deleted file mode 100644
index 7cfd01a4ef79..000000000000
--- a/scala-package/native/linux-x86_64-cpu/pom.xml
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
- 4.0.0
-
- org.apache.mxnet
- mxnet-scala-native-parent
- 1.5.0-SNAPSHOT
- ../pom.xml
-
-
- libmxnet-scala-linux-x86_64-cpu
- MXNet Scala Package - Native Linux-x86_64 CPU-only
- http://maven.apache.org
-
- so
-
-
- ${project.parent.parent.basedir}/..
-
-
-
-
- org.apache.mxnet
- mxnet-core_${scala.binary.version}
- 1.5.0-SNAPSHOT
- jar
- compile
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
-
-
- org.codehaus.mojo
- native-maven-plugin
- true
-
-
- linux
- generic-classic
- ${cxx}
- ${cxx}
-
-
-
-
- -std=c++0x
-
-
- -I${MXNET_DIR}/include
- -I${MXNET_DIR}/3rdparty/dmlc-core/include
- -I${MXNET_DIR}/3rdparty/mshadow
- -I${MXNET_DIR}/3rdparty/dlpack/include
- -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
- -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
- -O3 -DNDEBUG=1 -fPIC -msse3 -mf16c
- -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
-
-
- -shared
-
-
- -Wl,-rpath=${dollar}ORIGIN -lmxnet -L${MXNET_DIR}/lib
-
-
-
-
-
- javah
- generate-sources
-
- default
- ${project.build.directory}/custom-javah
- ${basedir}
- org_apache_mxnet_native_c_api.h
-
- org.apache.mxnet.LibInfo
-
-
-
- javah
-
-
-
-
-
-
-
diff --git a/scala-package/native/linux-x86_64-gpu/pom.xml b/scala-package/native/linux-x86_64-gpu/pom.xml
deleted file mode 100644
index 668f330b5ff9..000000000000
--- a/scala-package/native/linux-x86_64-gpu/pom.xml
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
- 4.0.0
-
- org.apache.mxnet
- mxnet-scala-native-parent
- 1.5.0-SNAPSHOT
- ../pom.xml
-
-
- libmxnet-scala-linux-x86_64-gpu
- MXNet Scala Package - Native Linux-x86_64 GPU
- http://maven.apache.org
-
- so
-
-
- ${project.parent.parent.basedir}/..
-
-
-
-
- org.apache.mxnet
- mxnet-core_${scala.binary.version}
- 1.5.0-SNAPSHOT
- jar
- compile
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
-
-
- org.codehaus.mojo
- native-maven-plugin
- true
-
-
- linux
- generic-classic
- ${cxx}
- ${cxx}
-
-
-
-
- -std=c++0x
-
-
- -I${MXNET_DIR}/include
- -I${MXNET_DIR}/3rdparty/dmlc-core/include
- -I${MXNET_DIR}/3rdparty/mshadow
- -I${MXNET_DIR}/3rdparty/dlpack/include
- -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
- -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
- -O3 -DNDEBUG=1 -fPIC -msse3 -mf16c
- -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
-
-
- -shared
-
-
- -Wl,-rpath=${dollar}ORIGIN -lmxnet -L${MXNET_DIR}/lib
-
-
-
-
-
- javah
- generate-sources
-
- default
- ${project.build.directory}/custom-javah
- ${basedir}
- org_apache_mxnet_native_c_api.h
-
- org.apache.mxnet.LibInfo
-
-
-
- javah
-
-
-
-
-
-
-
diff --git a/scala-package/native/osx-x86_64-cpu/pom.xml b/scala-package/native/osx-x86_64-cpu/pom.xml
deleted file mode 100644
index 425ca96815de..000000000000
--- a/scala-package/native/osx-x86_64-cpu/pom.xml
+++ /dev/null
@@ -1,142 +0,0 @@
-
-
- 4.0.0
-
- org.apache.mxnet
- mxnet-scala-native-parent
- 1.5.0-SNAPSHOT
- ../pom.xml
-
-
- libmxnet-scala-osx-x86_64-cpu
- MXNet Scala Package - Native OSX-x86_64 CPU-only
- http://maven.apache.org
-
- jnilib
-
-
- ${project.parent.parent.basedir}/..
-
-
-
-
- org.apache.mxnet
- mxnet-core_${scala.binary.version}
- 1.5.0-SNAPSHOT
- jar
- compile
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
-
-
-
- org.codehaus.mojo
- native-maven-plugin
- true
-
-
- darwin
- generic-classic
- ${cxx}
- ${cxx}
-
-
-
-
- -std=c++0x
-
-
- -I${MXNET_DIR}/include
- -I${MXNET_DIR}/3rdparty/dmlc-core/include
- -I${MXNET_DIR}/3rdparty/mshadow
- -I${MXNET_DIR}/3rdparty/dlpack/include
- -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
- -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
- -g -O0 -fPIC -msse3 -mf16c
- -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
-
-
- -shared
-
-
- -framework JavaVM
- -Wl,-exported_symbol,_Java_*
- -Wl,-x
-
-
- -Wl,-install_name,libmxnet-scala.jnilib -lmxnet -L${MXNET_DIR}/lib
-
-
-
-
-
- javah
- generate-sources
-
- default
- ${project.build.directory}/custom-javah
- ${basedir}
- org_apache_mxnet_native_c_api.h
-
- org.apache.mxnet.LibInfo
-
-
-
- javah
-
-
-
-
-
-
- org.codehaus.mojo
- exec-maven-plugin
- 1.6.0
-
-
- post-native-build
- package
-
- exec
-
-
- install_name_tool
- -change lib/libmxnet.so @loader_path/libmxnet.so ${project.build.directory}/${artifactId}.jnilib
-
-
-
- link-native-lib
- generate-resources
-
- exec
-
-
- ln
- -sf ${MXNET_DIR}/lib/libmxnet.so ${project.build.directory}/libmxnet.so
-
-
-
-
-
-
-
diff --git a/scala-package/native/pom.xml b/scala-package/native/pom.xml
index 2f6425d21104..7b776d5b5171 100644
--- a/scala-package/native/pom.xml
+++ b/scala-package/native/pom.xml
@@ -5,46 +5,165 @@
4.0.0org.apache.mxnet
- mxnet-parent_2.11
- 1.5.0-SNAPSHOT
+ mxnet-parent
+ INTERNAL../pom.xml
- mxnet-scala-native-parent
- MXNet Scala Package - Native Parent
- pom
+ libmxnet-scala
+ MXNet Scala Package - Native
+ ${libtype}
+
+
+ ${project.parent.basedir}/..
+
- osx-x86_64-cpu
-
- osx-x86_64-cpu
-
-
-
- linux-x86_64-cpu
-
- linux-x86_64-cpu
-
+ osx-x86_64
+
+ mac
+
+
+
+
+ org.codehaus.mojo
+ native-maven-plugin
+ true
+
+ darwin
+ generic-classic
+ ${cxx}
+ ${cxx}
+
+
+
+
+ -std=c++0x
+
+
+ -I${MXNET_DIR}/include
+ -I${MXNET_DIR}/3rdparty/dmlc-core/include
+ -I${MXNET_DIR}/3rdparty/mshadow
+ -I${MXNET_DIR}/3rdparty/dlpack/include
+ -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
+ -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
+ -g -O0 -fPIC -msse3 -mf16c
+ -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
+
+
+ -shared
+
+
+ -framework JavaVM
+ -Wl,-exported_symbol,_Java_*
+ -Wl,-x
+
+
+ -Wl,-install_name,libmxnet-scala.jnilib -lmxnet -L${MXNET_DIR}/lib
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
+
+
+ post-native-build
+ package
+
+ exec
+
+
+ install_name_tool
+ -add_rpath @loader_path ${project.build.directory}/${project.artifactId}.jnilib
+
+
+
+
+
+
- linux-x86_64-gpu
-
- linux-x86_64-gpu
-
+ linux-x86_64
+
+
+ unix
+ Linux
+
+
+
+
+
+ org.codehaus.mojo
+ native-maven-plugin
+ true
+
+ linux
+ generic-classic
+ ${cxx}
+ ${cxx}
+
+
+
+
+ -std=c++0x
+
+
+ -I${MXNET_DIR}/include
+ -I${MXNET_DIR}/3rdparty/dmlc-core/include
+ -I${MXNET_DIR}/3rdparty/mshadow
+ -I${MXNET_DIR}/3rdparty/dlpack/include
+ -I${MXNET_DIR}/3rdparty/tvm/nnvm/include
+ -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0
+ -O3 -DNDEBUG=1 -fPIC -msse3 -mf16c
+ -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs
+
+
+ -shared
+
+
+ -Wl,-rpath=${dollar}ORIGIN -lmxnet -L${MXNET_DIR}/lib
+
+
+
+
+
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
+
+
+ link-native-lib
+ generate-resources
+
+ exec
+
+
+ bash
+ -c 'ln -sf ${MXNET_DIR}/lib/* ${project.build.directory}/'
+
+
+
-
diff --git a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc
index 17d166eac345..ea6e9c8f5ba4 100644
--- a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc
+++ b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc
@@ -33,6 +33,7 @@
#include
#include
#include
+#include
#include "jni_helper_func.h"
JavaVM *_jvm;
@@ -423,6 +424,15 @@ JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyFromCPU
return ret;
}
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFloat64NDArraySyncCopyFromCPU
+ (JNIEnv *env, jobject obj, jlong arrayPtr, jdoubleArray sourceArr, jint arrSize) {
+ jdouble *sourcePtr = env->GetDoubleArrayElements(sourceArr, NULL);
+ int ret = MXNDArraySyncCopyFromCPU(reinterpret_cast(arrayPtr),
+ static_cast(sourcePtr), arrSize);
+ env->ReleaseDoubleArrayElements(sourceArr, sourcePtr, 0);
+ return ret;
+}
+
JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetContext
(JNIEnv *env, jobject obj, jlong arrayPtr, jobject devTypeId, jobject devId) {
int outDevType;
diff --git a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h
new file mode 100644
index 000000000000..7e8e03de9124
--- /dev/null
+++ b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h
@@ -0,0 +1,861 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include
+/* Header for class org_apache_mxnet_LibInfo */
+
+#ifndef _Included_org_apache_mxnet_LibInfo
+#define _Included_org_apache_mxnet_LibInfo
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: nativeLibInit
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_nativeLibInit
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxGetLastError
+ * Signature: ()Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_org_apache_mxnet_LibInfo_mxGetLastError
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxListAllOpNames
+ * Signature: (Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListAllOpNames
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: nnGetOpHandle
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_nnGetOpHandle
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxImperativeInvoke
+ * Signature: (J[J[JLscala/collection/mutable/ArrayBuffer;I[Ljava/lang/String;[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxImperativeInvoke
+ (JNIEnv *, jobject, jlong, jlongArray, jlongArray, jobject, jint, jobjectArray, jobjectArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayCreateNone
+ * Signature: (Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateNone
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayCreateEx
+ * Signature: ([IIIIIILorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateEx
+ (JNIEnv *, jobject, jintArray, jint, jint, jint, jint, jint, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayWaitAll
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayWaitAll
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayWaitToRead
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayWaitToRead
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxListFunctions
+ * Signature: (Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListFunctions
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxFuncDescribe
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncDescribe
+ (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxFuncGetInfo
+ * Signature: (JLorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncGetInfo
+ (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxFuncInvoke
+ * Signature: (J[J[F[J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncInvoke
+ (JNIEnv *, jobject, jlong, jlongArray, jfloatArray, jlongArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxFuncInvokeEx
+ * Signature: (J[J[F[JI[[B[[B)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncInvokeEx
+ (JNIEnv *, jobject, jlong, jlongArray, jfloatArray, jlongArray, jint, jobjectArray, jobjectArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayGetShape
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetShape
+ (JNIEnv *, jobject, jlong, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArraySyncCopyToCPU
+ * Signature: (J[BI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyToCPU
+ (JNIEnv *, jobject, jlong, jbyteArray, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArraySlice
+ * Signature: (JIILorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySlice
+ (JNIEnv *, jobject, jlong, jint, jint, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayAt
+ * Signature: (JILorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayAt
+ (JNIEnv *, jobject, jlong, jint, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayReshape
+ * Signature: (JI[ILorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayReshape
+ (JNIEnv *, jobject, jlong, jint, jintArray, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArraySyncCopyFromCPU
+ * Signature: (J[FI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyFromCPU
+ (JNIEnv *, jobject, jlong, jfloatArray, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxFloat64NDArraySyncCopyFromCPU
+ * Signature: (J[DI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFloat64NDArraySyncCopyFromCPU
+ (JNIEnv *, jobject, jlong, jdoubleArray, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayLoad
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayLoad
+ (JNIEnv *, jobject, jstring, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArraySave
+ * Signature: (Ljava/lang/String;[J[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySave
+ (JNIEnv *, jobject, jstring, jlongArray, jobjectArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayGetContext
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetContext
+ (JNIEnv *, jobject, jlong, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArraySaveRawBytes
+ * Signature: (JLscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySaveRawBytes
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayLoadFromRawBytes
+ * Signature: ([BLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayLoadFromRawBytes
+ (JNIEnv *, jobject, jbyteArray, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNDArrayGetDType
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetDType
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxInitPSEnv
+ * Signature: ([Ljava/lang/String;[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxInitPSEnv
+ (JNIEnv *, jobject, jobjectArray, jobjectArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreRunServer
+ * Signature: (JLorg/apache/mxnet/KVServerControllerCallback;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreRunServer
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreGetNumDeadNode
+ * Signature: (JILorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetNumDeadNode
+ (JNIEnv *, jobject, jlong, jint, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreCreate
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreCreate
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreInit
+ * Signature: (JI[I[J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreInit
+ (JNIEnv *, jobject, jlong, jint, jintArray, jlongArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreInitEx
+ * Signature: (JI[Ljava/lang/String;[J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreInitEx
+ (JNIEnv *, jobject, jlong, jint, jobjectArray, jlongArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStorePush
+ * Signature: (JI[I[JI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePush
+ (JNIEnv *, jobject, jlong, jint, jintArray, jlongArray, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStorePushEx
+ * Signature: (JI[Ljava/lang/String;[JI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePushEx
+ (JNIEnv *, jobject, jlong, jint, jobjectArray, jlongArray, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStorePull
+ * Signature: (JI[I[JI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePull
+ (JNIEnv *, jobject, jlong, jint, jintArray, jlongArray, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStorePullEx
+ * Signature: (JI[Ljava/lang/String;[JI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePullEx
+ (JNIEnv *, jobject, jlong, jint, jobjectArray, jlongArray, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreSetUpdater
+ * Signature: (JLorg/apache/mxnet/MXKVStoreUpdater;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSetUpdater
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreIsWorkerNode
+ * Signature: (Lorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreIsWorkerNode
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreGetType
+ * Signature: (JLorg/apache/mxnet/Base/RefString;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetType
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreSendCommmandToServers
+ * Signature: (JILjava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSendCommmandToServers
+ (JNIEnv *, jobject, jlong, jint, jstring);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreBarrier
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreBarrier
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreGetGroupSize
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetGroupSize
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreGetRank
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetRank
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreSetBarrierBeforeExit
+ * Signature: (JI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSetBarrierBeforeExit
+ (JNIEnv *, jobject, jlong, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxKVStoreFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxListDataIters
+ * Signature: (Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListDataIters
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterCreateIter
+ * Signature: (J[Ljava/lang/String;[Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterCreateIter
+ (JNIEnv *, jobject, jlong, jobjectArray, jobjectArray, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterGetIterInfo
+ * Signature: (JLorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefString;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetIterInfo
+ (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterBeforeFirst
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterBeforeFirst
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterNext
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterNext
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterGetLabel
+ * Signature: (JLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetLabel
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterGetData
+ * Signature: (JLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetData
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterGetIndex
+ * Signature: (JLscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetIndex
+ (JNIEnv *, jobject, jlong, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDataIterGetPadNum
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetPadNum
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorOutputs
+ * Signature: (JLscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorOutputs
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorForward
+ * Signature: (JI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorForward
+ (JNIEnv *, jobject, jlong, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorBackward
+ * Signature: (J[J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBackward
+ (JNIEnv *, jobject, jlong, jlongArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorPrint
+ * Signature: (JLorg/apache/mxnet/Base/RefString;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorPrint
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorSetMonitorCallback
+ * Signature: (JLorg/apache/mxnet/MXMonitorCallback;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorSetMonitorCallback
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolListAtomicSymbolCreators
+ * Signature: (Lscala/collection/mutable/ListBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAtomicSymbolCreators
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolGetAtomicSymbolInfo
+ * Signature: (JLorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefString;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetAtomicSymbolInfo
+ (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolCreateAtomicSymbol
+ * Signature: (J[Ljava/lang/String;[Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateAtomicSymbol
+ (JNIEnv *, jobject, jlong, jobjectArray, jobjectArray, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolSetAttr
+ * Signature: (JLjava/lang/String;Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSetAttr
+ (JNIEnv *, jobject, jlong, jstring, jstring);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolListAttrShallow
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAttrShallow
+ (JNIEnv *, jobject, jlong, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolListAttr
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAttr
+ (JNIEnv *, jobject, jlong, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolCompose
+ * Signature: (JLjava/lang/String;[Ljava/lang/String;[J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCompose
+ (JNIEnv *, jobject, jlong, jstring, jobjectArray, jlongArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolCreateVariable
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateVariable
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolGetAttr
+ * Signature: (JLjava/lang/String;Lorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetAttr
+ (JNIEnv *, jobject, jlong, jstring, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolListArguments
+ * Signature: (JLscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListArguments
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolCopy
+ * Signature: (JLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCopy
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolListAuxiliaryStates
+ * Signature: (JLscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAuxiliaryStates
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolListOutputs
+ * Signature: (JLscala/collection/mutable/ArrayBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListOutputs
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolCreateGroup
+ * Signature: ([JLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateGroup
+ (JNIEnv *, jobject, jlongArray, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolPrint
+ * Signature: (JLorg/apache/mxnet/Base/RefString;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolPrint
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolGetInternals
+ * Signature: (JLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetInternals
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolInferType
+ * Signature: (J[Ljava/lang/String;[ILscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferType
+ (JNIEnv *, jobject, jlong, jobjectArray, jintArray, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolInferShape
+ * Signature: (JI[Ljava/lang/String;[I[ILscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferShape
+ (JNIEnv *, jobject, jlong, jint, jobjectArray, jintArray, jintArray, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolGetOutput
+ * Signature: (JILorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetOutput
+ (JNIEnv *, jobject, jlong, jint, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolSaveToJSON
+ * Signature: (JLorg/apache/mxnet/Base/RefString;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSaveToJSON
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolCreateFromJSON
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateFromJSON
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorBindX
+ * Signature: (JIII[Ljava/lang/String;[I[II[J[J[I[JLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBindX
+ (JNIEnv *, jobject, jlong, jint, jint, jint, jobjectArray, jintArray, jintArray, jint, jlongArray, jlongArray, jintArray, jlongArray, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxExecutorBindEX
+ * Signature: (JIII[Ljava/lang/String;[I[II[J[J[I[JJLorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBindEX
+ (JNIEnv *, jobject, jlong, jint, jint, jint, jobjectArray, jintArray, jintArray, jint, jlongArray, jlongArray, jintArray, jlongArray, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolSaveToFile
+ * Signature: (JLjava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSaveToFile
+ (JNIEnv *, jobject, jlong, jstring);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolCreateFromFile
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateFromFile
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSymbolFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRandomSeed
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRandomSeed
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxNotifyShutdown
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNotifyShutdown
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOWriterCreate
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterCreate
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOReaderCreate
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderCreate
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOWriterFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOReaderFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOWriterWriteRecord
+ * Signature: (JLjava/lang/String;I)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterWriteRecord
+ (JNIEnv *, jobject, jlong, jstring, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOReaderReadRecord
+ * Signature: (JLorg/apache/mxnet/Base/RefString;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderReadRecord
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOWriterTell
+ * Signature: (JLorg/apache/mxnet/Base/RefInt;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterTell
+ (JNIEnv *, jobject, jlong, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRecordIOReaderSeek
+ * Signature: (JI)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderSeek
+ (JNIEnv *, jobject, jlong, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRtcCreate
+ * Signature: (Ljava/lang/String;[Ljava/lang/String;[Ljava/lang/String;[J[JLjava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcCreate
+ (JNIEnv *, jobject, jstring, jobjectArray, jobjectArray, jlongArray, jlongArray, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRtcPush
+ * Signature: (J[J[JIIIIII)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcPush
+ (JNIEnv *, jobject, jlong, jlongArray, jlongArray, jint, jint, jint, jint, jint, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxRtcFree
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcFree
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxCustomOpRegister
+ * Signature: (Ljava/lang/String;Lorg/apache/mxnet/CustomOpProp;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxCustomOpRegister
+ (JNIEnv *, jobject, jstring, jobject);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSetProfilerConfig
+ * Signature: ([Ljava/lang/String;[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetProfilerConfig
+ (JNIEnv *, jobject, jobjectArray, jobjectArray);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxSetProfilerState
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetProfilerState
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_apache_mxnet_LibInfo
+ * Method: mxDumpProfile
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDumpProfile
+ (JNIEnv *, jobject, jint);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/scala-package/packageTest/Makefile b/scala-package/packageTest/Makefile
index 6073ff8a722f..8c12c1d04189 100644
--- a/scala-package/packageTest/Makefile
+++ b/scala-package/packageTest/Makefile
@@ -43,6 +43,10 @@ else
endif
endif
+ifeq ($(CI), 1)
+ MAVEN_ARGS := -B
+endif
+
PROFILES := -Ptest
ifeq ($(UNIT), 1)
PROFILES := "$(PROFILES),unittest"
@@ -59,27 +63,27 @@ endif
clean:
- (mvn clean -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
+ (mvn $(MAVEN_ARGS) clean -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
-Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \
-Dmxnet.version=$(MXNET_VERSION) \
-Dscala.version=$(SCALA_VERSION))
testinstall:
- (mvn integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
+ (mvn $(MAVEN_ARGS) integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
$(PROFILES) \
-Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \
-Dmxnet.version=$(MXNET_VERSION) \
-Dscala.version=$(SCALA_VERSION))
testlocal:
- (mvn integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
+ (mvn $(MAVEN_ARGS) integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
$(PROFILES),fromLocal \
-Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \
-Dmxnet.version=$(MXNET_VERSION) \
-Dscala.version=$(SCALA_VERSION))
testsnapshot:
- (mvn integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
+ (mvn $(MAVEN_ARGS) integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \
$(PROFILES),fromSnapshots \
-Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \
-Dmxnet.repo=$(MXNET_REPO) \
diff --git a/scala-package/packageTest/pom.xml b/scala-package/packageTest/pom.xml
index 9c5c11cf2779..f7d9e3b180bc 100644
--- a/scala-package/packageTest/pom.xml
+++ b/scala-package/packageTest/pom.xml
@@ -42,7 +42,7 @@
local-snapshot
- file://${basedir}/../local-snapshot
+ file://${basedir}/../deploy/target/repotrue
diff --git a/scala-package/pom.xml b/scala-package/pom.xml
index 6eb573bf3e23..6665e953dcd1 100644
--- a/scala-package/pom.xml
+++ b/scala-package/pom.xml
@@ -6,11 +6,12 @@
org.apacheapache
- 19
+ 19
+
org.apache.mxnet
- mxnet-parent_2.11
- 1.5.0-SNAPSHOT
+ mxnet-parent
+ INTERNALMXNet Scala Package - Parent/~https://github.com/apache/incubator-mxnet/tree/master/scala-package
@@ -37,10 +38,11 @@
2.11.8
- 2.11
-
+ g++$
+ ${project.basedir}/..
+ truepom
@@ -48,46 +50,18 @@
initinit-nativemacros
- corenative
+ coreinferexamplessparkassembly
+ deploy
-
- release
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
-
- generate-sources
-
- add-source
-
-
-
-
-
-
-
-
-
-
-
-
-
scala-2.11
-
- 2.11.8
- 2.11
-
@@ -117,31 +91,66 @@
- scala-2.12
+ osx-x86_64
+
+
+ mac
+
+
+
+ osx-x86_64
+ jnilib
+ cpu
+
+
+
+ linux-x86_64
+
+
+ unix
+ Linux
+
+
- 2.12.4
- 2.12
+ linux-x86_64
+ so
+
- org.apache.maven.plugins
- maven-enforcer-plugin
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
- enforce-versions
+ init-build-flavor
+ initialize
- enforce
+ exec
-
-
-
- *:*_2.11
- *:*_2.10
-
-
-
+ bash
+ -c 'mkdir -p ${project.build.directory}; if [[ $(ldd ${MXNET_DIR}/lib/libmxnet.so | grep libcuda.so | wc -l) == "0" ]]; then echo flavor=cpu > ${project.build.directory}/flavor.properties; else echo flavor=gpu > ${project.build.directory}/flavor.properties; fi'
+
+
+
+
+
+ org.codehaus.mojo
+ properties-maven-plugin
+ 1.0.0
+
+
+ read-properties
+ initialize
+
+ read-project-properties
+
+
+
+ ${project.build.directory}/flavor.properties
+
@@ -154,19 +163,25 @@
- org.apache.maven.plugins
- maven-release-plugin
-
- true
- false
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
-
- true
-
+ org.commonjava.maven.plugins
+ directory-maven-plugin
+ 0.1
+
+
+ directories
+
+ directory-of
+
+ initialize
+
+ rootdir
+
+ org.apache.mxnet
+ mxnet-parent
+
+
+
+ org.apache.maven.plugins
@@ -209,14 +224,17 @@
org.apache.maven.pluginsmaven-assembly-plugin
- 2.5.5
+ 3.1.0org.apache.maven.pluginsmaven-surefire-plugin
- 2.19
+ 2.22.0
- true
+ ${skipJavaTests}
+
+ -Djava.library.path=${project.parent.basedir}/native/target
+ false
@@ -231,7 +249,6 @@
scalatest-maven-plugin1.0
- ${skipTests}${project.build.directory}/surefire-reports.F
@@ -256,7 +273,7 @@
org.scalastylescalastyle-maven-plugin
- 0.8.0
+ 1.0.0falsetrue
@@ -264,7 +281,7 @@
false${basedir}/src/main/scala${basedir}/src/test/scala
- scalastyle-config.xml
+ ${rootdir}/scalastyle-config.xml${basedir}/target/scalastyle-output.xmlUTF-8
@@ -315,19 +332,17 @@
+
+
+ org.apache.maven.plugins
+ maven-deploy-plugin
+
+ true
+
+
-
- org.scala-lang
- scala-library
- ${scala.version}
-
-
- org.scala-lang
- scala-reflect
- ${scala.version}
- commons-codeccommons-codec
@@ -352,7 +367,7 @@
org.scalatest
- scalatest_${scala.binary.version}
+ scalatest_2.113.0.4test
@@ -363,13 +378,25 @@
org.scalacheck
- scalacheck_${scala.binary.version}
+ scalacheck_2.111.13.5test
+
+
+
+ org.scala-lang
+ scala-library
+ ${scala.version}
+
+
+ org.scala-lang
+ scala-reflect
+ ${scala.version}
+ org.scala-lang.modules
- scala-parser-combinators_${scala.binary.version}
+ scala-parser-combinators_2.111.0.4
diff --git a/scala-package/spark/README.md b/scala-package/spark/README.md
index 06106648c059..503c279038a5 100644
--- a/scala-package/spark/README.md
+++ b/scala-package/spark/README.md
@@ -16,7 +16,8 @@ Checkout the [Installation Guide](http://mxnet.io/get_started/setup.html) contai
Compile the Scala Package by
```bash
-make scalapkg
+cd scala-package
+mvn package
```
This will automatically build the `spark` submodule. Now you can submit Spark job with these built jars.
diff --git a/scala-package/spark/bin/run-mnist-example.sh b/scala-package/spark/bin/run-mnist-example.sh
index 4ebd6c61d56b..4f747f2c91a1 100755
--- a/scala-package/spark/bin/run-mnist-example.sh
+++ b/scala-package/spark/bin/run-mnist-example.sh
@@ -27,9 +27,9 @@ OS=""
if [ "$(uname)" == "Darwin" ]; then
# Do something under Mac OS X platform
- OS='osx-x86_64-cpu'
+ OS='osx-x86_64'
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
- OS='linux-x86_64-cpu'
+ OS='linux-x86_64'
fi
LIB_DIR=${SPARK_MODULE_DIR}/target/classes/lib
diff --git a/scala-package/spark/pom.xml b/scala-package/spark/pom.xml
index 2db3bee8c78d..f2737e9334f4 100644
--- a/scala-package/spark/pom.xml
+++ b/scala-package/spark/pom.xml
@@ -5,47 +5,28 @@
4.0.0org.apache.mxnet
- mxnet-parent_2.11
- 1.5.0-SNAPSHOT
+ mxnet-parent
+ INTERNAL../pom.xml
- mxnet-spark_2.11
+ mxnet-sparkMXNet Scala Package - Spark ML1.6.3
-
-
- osx-x86_64-cpu
-
- osx-x86_64-cpu
-
-
-
- linux-x86_64-cpu
-
- linux-x86_64-cpu
-
-
-
- linux-x86_64-gpu
-
- linux-x86_64-gpu
-
-
-
+
org.apache.mxnet
- mxnet-core_${scala.binary.version}
- 1.5.0-SNAPSHOT
+ mxnet-core
+ INTERNALprovidedorg.apache.spark
- spark-mllib_${scala.binary.version}
+ spark-mllib_2.11${spark.version}
diff --git a/snapcraft.yaml b/snapcraft.yaml
index d8d0e301e6b1..9791cd86fc0e 100644
--- a/snapcraft.yaml
+++ b/snapcraft.yaml
@@ -1,3 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
name: mxnet
version: '1.5.0'
summary: MXNet is a deep learning framework designed for efficiency and flexibility.
diff --git a/src/c_api/c_api_common.h b/src/c_api/c_api_common.h
index 079b587e9965..ecb05bc78ca4 100644
--- a/src/c_api/c_api_common.h
+++ b/src/c_api/c_api_common.h
@@ -29,37 +29,29 @@
#include
#include
#include
+#include
#include
#include
#include
#include
-/*! \brief macro to guard beginning and end section of all functions */
-#define API_BEGIN() try { on_enter_api(__FUNCTION__);
-/*! \brief every function starts with API_BEGIN();
- and finishes with API_END() or API_END_HANDLE_ERROR */
-#define API_END() } catch(dmlc::Error &_except_) { on_exit_api(); return MXAPIHandleException(_except_); } on_exit_api(); return 0; // NOLINT(*)
/*!
- * \brief every function starts with API_BEGIN();
- * and finishes with API_END() or API_END_HANDLE_ERROR
- * The finally clause contains procedure to cleanup states when an error happens.
+ * \brief Macros to guard beginning and end section of all functions
+ * every function starts with API_BEGIN()
+ * and finishes with API_END() or API_END_HANDLE_ERROR()
+ * The finally clause contains procedure to cleanup states when an error happens.
*/
-#define API_END_HANDLE_ERROR(Finalize) } catch(dmlc::Error &_except_) { Finalize; on_exit_api(); return MXAPIHandleException(_except_); } on_exit_api(); return 0; // NOLINT(*)
+#ifndef API_BEGIN
+#define API_BEGIN MX_API_BEGIN
+#endif
-/*!
- * \brief Set the last error message needed by C API
- * \param msg The error message to set.
- */
-void MXAPISetLastError(const char* msg);
-/*!
- * \brief handle exception throwed out
- * \param e the exception
- * \return the return value of API after exception is handled
- */
-inline int MXAPIHandleException(const dmlc::Error &e) {
- MXAPISetLastError(e.what());
- return -1;
-}
+#ifndef API_END
+#define API_END MX_API_END
+#endif
+
+#ifndef API_END_HANDLE_ERROR
+#define API_END_HANDLE_ERROR MX_API_END_HANDLE_ERROR
+#endif
using namespace mxnet;
@@ -137,10 +129,6 @@ inline void CopyAttr(const nnvm::IndexedGraph& idx,
// stores keys that will be converted to __key__
extern const std::vector kHiddenKeys;
-
-extern void on_enter_api(const char *function);
-extern void on_exit_api();
-
} // namespace mxnet
#endif // MXNET_C_API_C_API_COMMON_H_
diff --git a/src/c_api/c_api_executor.cc b/src/c_api/c_api_executor.cc
index 1f936b164326..e2e53c7261fa 100644
--- a/src/c_api/c_api_executor.cc
+++ b/src/c_api/c_api_executor.cc
@@ -148,8 +148,6 @@ int MXExecutorBindEX(SymbolHandle symbol_handle,
NDArrayHandle *aux_states,
ExecutorHandle shared_exec,
ExecutorHandle *out) {
- Executor* exec = nullptr;
-
API_BEGIN();
nnvm::Symbol *symb = static_cast(symbol_handle);
Context ctx = Context::Create(static_cast(dev_type), dev_id);
@@ -181,7 +179,7 @@ int MXExecutorBindEX(SymbolHandle symbol_handle,
*out = Executor::Bind(*symb, ctx, ctx_map, in_args_vec,
arg_grad_vec, grad_req_vec, aux_states_vec,
reinterpret_cast(shared_exec));
- API_END_HANDLE_ERROR(delete exec);
+ API_END();
}
/*!
@@ -558,8 +556,11 @@ int MXExecutorReshape(int partial_shaping,
NDArrayHandle** aux_states,
ExecutorHandle shared_exec,
ExecutorHandle *out) {
+ Executor* new_exec = nullptr;
+
MXAPIThreadLocalEntry *ret = MXAPIThreadLocalStore::Get();
API_BEGIN();
+ *out = nullptr; // ensure we can know whether to free executor on early abort
// create shape map for in_args and aux_states
std::unordered_map kwargs(num_provided_arg_shapes);
for (mx_uint i = 0; i < num_provided_arg_shapes; ++i) {
@@ -581,8 +582,9 @@ int MXExecutorReshape(int partial_shaping,
std::vector aux_state_vec;
Executor* exec = static_cast(shared_exec);
- *out = exec->Reshape(partial_shaping, allow_up_sizing, ctx, ctx_map, kwargs,
+ new_exec = exec->Reshape(partial_shaping, allow_up_sizing, ctx, ctx_map, kwargs,
&in_arg_vec, &arg_grad_vec, &aux_state_vec);
+ *out = new_exec;
ret->ret_handles.clear();
ret->ret_handles.reserve(in_arg_vec.size()+arg_grad_vec.size()+aux_state_vec.size());
@@ -623,7 +625,7 @@ int MXExecutorReshape(int partial_shaping,
*aux_states = &(ret->ret_handles[nd_idx]);
nd_idx = ret->ret_handles.size();
}
- API_END_HANDLE_ERROR(delete out);
+ API_END_HANDLE_ERROR(delete new_exec);
}
int MXExecutorGetOptimizedSymbol(ExecutorHandle handle,
diff --git a/src/common/cuda_utils.h b/src/common/cuda_utils.h
index 047edde88a53..0dd9d2db3722 100644
--- a/src/common/cuda_utils.h
+++ b/src/common/cuda_utils.h
@@ -286,22 +286,35 @@ inline DType __device__ CudaMin(DType a, DType b) {
class DeviceStore {
public:
/*! \brief default constructor- only optionally restores previous device */
- explicit DeviceStore(bool restore = true) : restore_(restore) {
+ explicit DeviceStore(int requested_device = -1, bool restore = true) :
+ restore_device_(-1),
+ current_device_(requested_device),
+ restore_(restore) {
if (restore_)
CUDA_CALL(cudaGetDevice(&restore_device_));
+ if (requested_device != restore_device_) {
+ SetDevice(requested_device);
+ }
}
~DeviceStore() {
- if (restore_)
+ if (restore_ &&
+ current_device_ != restore_device_ &&
+ current_device_ != -1 &&
+ restore_device_ != -1)
CUDA_CALL(cudaSetDevice(restore_device_));
}
void SetDevice(int device) {
- CUDA_CALL(cudaSetDevice(device));
+ if (device != -1) {
+ CUDA_CALL(cudaSetDevice(device));
+ current_device_ = device;
+ }
}
private:
int restore_device_;
+ int current_device_;
bool restore_;
};
diff --git a/src/engine/stream_manager.h b/src/engine/stream_manager.h
index d4ac042ff401..516e04bf5e82 100644
--- a/src/engine/stream_manager.h
+++ b/src/engine/stream_manager.h
@@ -65,9 +65,6 @@ template
RunContext StreamManager::GetRunContext(
Context const& ctx) {
RunContext ret;
-#if MXNET_USE_CUDA
- mxnet::common::cuda::DeviceStore device_store;
-#endif
switch (ctx.dev_mask()) {
case cpu::kDevMask:
ret = RunContext{ctx, nullptr};
@@ -75,11 +72,11 @@ RunContext StreamManager::GetRunContext(
case gpu::kDevMask: {
#if MXNET_USE_CUDA
std::size_t use_counter;
- device_store.SetDevice(ctx.dev_id);
{
std::lock_guard lock{mutex_};
auto&& counter = gpu_cnt_.at(ctx.dev_id);
if (counter == -1) {
+ mxnet::common::cuda::DeviceStore device_store(ctx.dev_id);
for (auto&& i : gpu_streams_.at(ctx.dev_id)) {
i = mshadow::NewStream(true, MXNET_USE_CUDNN != 0, ctx.dev_id);
}
@@ -104,19 +101,16 @@ template
RunContext StreamManager::GetIORunContext(
Context const& ctx) {
RunContext ret;
-#if MXNET_USE_CUDA
- mxnet::common::cuda::DeviceStore device_store;
-#endif
switch (ctx.dev_mask()) {
case cpu::kDevMask:
ret = RunContext{ctx, nullptr};
break;
case gpu::kDevMask: {
#if MXNET_USE_CUDA
- device_store.SetDevice(ctx.dev_id);
{
std::lock_guard lock{mutex_};
if (gpu_io_streams_.at(ctx.dev_id) == nullptr) {
+ mxnet::common::cuda::DeviceStore device_store(ctx.dev_id);
gpu_io_streams_.at(ctx.dev_id) = mshadow::NewStream(false, false, ctx.dev_id);
}
}
diff --git a/src/engine/threaded_engine_pooled.cc b/src/engine/threaded_engine_pooled.cc
index 1abb82fd6a67..c6eb99508e09 100644
--- a/src/engine/threaded_engine_pooled.cc
+++ b/src/engine/threaded_engine_pooled.cc
@@ -31,6 +31,9 @@
#include "./threaded_engine.h"
#include "./thread_pool.h"
#include "./stream_manager.h"
+#if MXNET_USE_CUDA
+#include "../common/cuda_utils.h"
+#endif
namespace mxnet {
namespace engine {
@@ -130,10 +133,13 @@ class ThreadedEnginePooled : public ThreadedEngine {
* \param opr_block The operator block.
*/
void DoExecute(OprBlock* opr_block) {
+#if MXNET_USE_CUDA
+ mxnet::common::cuda::DeviceStore device_store(-1, false);
+#endif
assert(opr_block->wait.load() == 0);
if (opr_block->ctx.dev_mask() == gpu::kDevMask) {
#if MXNET_USE_CUDA
- CUDA_CALL(cudaSetDevice(opr_block->ctx.dev_id));
+ device_store.SetDevice(opr_block->ctx.dev_id);
#else // MXNET_USE_CUDA
LOG(FATAL) << "Please compile with CUDA enabled";
#endif // MXNET_USE_CUDA
diff --git a/src/executor/tensorrt_pass.cc b/src/executor/tensorrt_pass.cc
index b5fc8d15f7ac..d26704c35cf5 100644
--- a/src/executor/tensorrt_pass.cc
+++ b/src/executor/tensorrt_pass.cc
@@ -324,10 +324,10 @@ nnvm::NodePtr ConvertNnvmGraphToOnnx(const nnvm::Graph &g,
std::unordered_map* const params_map) {
auto p = nnvm::Node::Create();
p->attrs.op = nnvm::Op::Get("_trt_op");
- op::TRTParam trt_param = op::nnvm_to_onnx::ConvertNnvmGraphToOnnx(g, params_map);
- p->attrs.dict["serialized_output_map"] = trt_param.serialized_output_map;
- p->attrs.dict["serialized_input_map"] = trt_param.serialized_input_map;
- p->attrs.dict["serialized_onnx_graph"] = trt_param.serialized_onnx_graph;
+ op::ONNXParam onnx_param = op::nnvm_to_onnx::ConvertNnvmGraphToOnnx(g, params_map);
+ p->attrs.dict["serialized_output_map"] = onnx_param.serialized_output_map;
+ p->attrs.dict["serialized_input_map"] = onnx_param.serialized_input_map;
+ p->attrs.dict["serialized_onnx_graph"] = onnx_param.serialized_onnx_graph;
if (p->op()->attr_parser != nullptr) {
p->op()->attr_parser(&(p->attrs));
}
diff --git a/src/executor/trt_graph_executor.cc b/src/executor/trt_graph_executor.cc
index 65dbb29792e0..ec35fee98a96 100644
--- a/src/executor/trt_graph_executor.cc
+++ b/src/executor/trt_graph_executor.cc
@@ -407,14 +407,7 @@ nnvm::Symbol TrtGraphExecutor::GetOptimizedSymbol() {
Symbol ret;
ret.outputs = std::vector(graph_.outputs.begin(),
graph_.outputs.begin() + num_forward_outputs_);
- ret = ret.Copy();
- static const Op* trt_op = Op::Get("_trt_op");
- DFSVisit(ret.outputs, [](const nnvm::NodePtr n) {
- if (n->op() == trt_op) {
- n->attrs.dict.clear();
- }
- });
- return ret;
+ return ret.Copy();
}
Executor *TrtGraphExecutor::TensorRTBind(nnvm::Symbol symbol,
diff --git a/src/initialize.cc b/src/initialize.cc
index ddda3f18a3ae..de7edd1b1455 100644
--- a/src/initialize.cc
+++ b/src/initialize.cc
@@ -57,11 +57,13 @@ class LibraryInitializer {
Engine::Get()->Start();
},
[]() {
- // Make children single threaded since they are typically workers
- dmlc::SetEnv("MXNET_CPU_WORKER_NTHREADS", 1);
+ // Conservative thread management for multiprocess workers
+ const size_t mp_worker_threads = dmlc::GetEnv("MXNET_MP_WORKER_NTHREADS", 1);
+ dmlc::SetEnv("MXNET_CPU_WORKER_NTHREADS", mp_worker_threads);
dmlc::SetEnv("OMP_NUM_THREADS", 1);
#if MXNET_USE_OPENCV && !__APPLE__
- cv::setNumThreads(0); // disable opencv threading
+ const size_t mp_cv_num_threads = dmlc::GetEnv("MXNET_MP_OPENCV_NUM_THREADS", 0);
+ cv::setNumThreads(mp_cv_num_threads); // disable opencv threading
#endif // MXNET_USE_OPENCV
engine::OpenMP::Get()->set_enabled(false);
Engine::Get()->Start();
diff --git a/src/kvstore/comm.h b/src/kvstore/comm.h
index 7090aaf46d8f..08f6155cb5b4 100644
--- a/src/kvstore/comm.h
+++ b/src/kvstore/comm.h
@@ -724,10 +724,9 @@ class CommDevice : public Comm {
int enabled = 0;
std::vector p2p(n*n);
- // Restores active device to what it was before EnableP2P
- mxnet::common::cuda::DeviceStore device_store;
for (int i = 0; i < n; ++i) {
- device_store.SetDevice(gpus[i]);
+ // Restores active device to what it was before EnableP2P
+ mxnet::common::cuda::DeviceStore device_store(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
diff --git a/src/kvstore/comm_tree.h b/src/kvstore/comm_tree.h
index e3b2ad7f57d3..b62228cd2885 100644
--- a/src/kvstore/comm_tree.h
+++ b/src/kvstore/comm_tree.h
@@ -339,9 +339,8 @@ class CommDeviceTree : public CommDevice {
int n = static_cast(gpus.size());
int enabled = 0;
std::vector p2p(n*n);
- mxnet::common::cuda::DeviceStore device_store;
for (int i = 0; i < n; ++i) {
- device_store.SetDevice(gpus[i]);
+ mxnet::common::cuda::DeviceStore device_store(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
diff --git a/src/ndarray/ndarray.cc b/src/ndarray/ndarray.cc
index 081d4e759323..251bfb3f0e1f 100644
--- a/src/ndarray/ndarray.cc
+++ b/src/ndarray/ndarray.cc
@@ -330,11 +330,10 @@ struct NDArrayDLManager {
};
DLManagedTensor* NDArray::ToDLPack() const {
+ CHECK(!is_none()) << "NDArray is not initialized";
NDArrayDLManager* dlmanager(new NDArrayDLManager);
dlmanager->handle = *this;
- if (!is_none()) {
- dlmanager->tensor.dl_tensor = data().dltensor();
- }
+ dlmanager->tensor.dl_tensor = dlmanager->handle.data().dltensor();
dlmanager->tensor.manager_ctx = dlmanager;
dlmanager->tensor.deleter = [](DLManagedTensor* dlmanager){
delete static_cast(dlmanager->manager_ctx);
@@ -454,17 +453,10 @@ void NDArray::Chunk::SetMKLMem(const TShape &shape, int dtype) {
mkldnn::memory::dims dims;
// These are shapes supprted by MKLDNN.
- if (shape.ndim() == 1 || shape.ndim() == 2 || shape.ndim() == 4
- || shape.ndim() == 5) {
+ if (shape.ndim() >= 1 && shape.ndim() <= 5) {
dims.resize(shape.ndim());
for (size_t i = 0; i < dims.size(); i++)
dims[i] = shape[i];
- } else if (shape.ndim() == 3) {
- // If there are 3 dimensions, we'll force it to 4 dimensions.
- dims.resize(shape.ndim() + 1);
- dims[0] = 1;
- for (size_t i = 0; i < shape.ndim(); i++)
- dims[i + 1] = shape[i];
} else {
LOG(FATAL) << "MKLDNN doesn't support " << shape.ndim() << " dimensions";
}
@@ -472,6 +464,7 @@ void NDArray::Chunk::SetMKLMem(const TShape &shape, int dtype) {
switch (dims.size()) {
case 1: layout = mkldnn::memory::format::x; break;
case 2: layout = mkldnn::memory::format::nc; break;
+ case 3: layout = mkldnn::memory::format::ncw; break;
case 4: layout = mkldnn::memory::format::nchw; break;
// This isn't the right layout when the data has 5 dimensions in MXNet.
// MXNet interprets 5 dimensions as ncdhw, but MKLDNN doesn't have
diff --git a/src/operator/c_lapack_api.cc b/src/operator/c_lapack_api.cc
new file mode 100644
index 000000000000..c6293bf8f684
--- /dev/null
+++ b/src/operator/c_lapack_api.cc
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "c_lapack_api.h"
+
+#if (MSHADOW_USE_MKL && MXNET_USE_LAPACK)
+#elif MXNET_USE_LAPACK
+#else
+ // use pragma message instead of warning
+ #pragma message("Warning: lapack usage not enabled, linalg-operators will not be available." \
+ " Ensure that lapack library is installed and build with USE_LAPACK=1 to get lapack" \
+ " functionalities.")
+
+ // Define compilable stubs.
+ #define MXNET_LAPACK_CWRAPPER1(func, dtype) \
+ int MXNET_LAPACK_##func(int matrix_layout, char uplo, int n, dtype* a, int lda) { \
+ LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
+ return 1; \
+ }
+
+ #define MXNET_LAPACK_CWRAPPER2(func, dtype) \
+ int MXNET_LAPACK_##func(int matrix_layout, int m, int n, dtype* a, \
+ int lda, dtype* tau, dtype* work, int lwork) { \
+ LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
+ return 1; \
+ }
+
+ #define MXNET_LAPACK_CWRAPPER3(func, dtype) \
+ int MXNET_LAPACK_##func(int matrix_layout, char uplo, int n, dtype *a, \
+ int lda, dtype *w, dtype *work, int lwork, \
+ int *iwork, int liwork) { \
+ LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
+ return 1; \
+ }
+
+ #define MXNET_LAPACK_UNAVAILABLE(func) \
+ int mxnet_lapack_##func(...) { \
+ LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
+ return 1; \
+ }
+ MXNET_LAPACK_CWRAPPER1(spotrf, float)
+ MXNET_LAPACK_CWRAPPER1(dpotrf, double)
+ MXNET_LAPACK_CWRAPPER1(spotri, float)
+ MXNET_LAPACK_CWRAPPER1(dpotri, double)
+
+ MXNET_LAPACK_UNAVAILABLE(sposv)
+ MXNET_LAPACK_UNAVAILABLE(dposv)
+
+ MXNET_LAPACK_CWRAPPER2(sgelqf, float)
+ MXNET_LAPACK_CWRAPPER2(dgelqf, double)
+ MXNET_LAPACK_CWRAPPER2(sorglq, float)
+ MXNET_LAPACK_CWRAPPER2(dorglq, double)
+
+ MXNET_LAPACK_CWRAPPER3(ssyevd, float)
+ MXNET_LAPACK_CWRAPPER3(dsyevd, double)
+#endif // MSHADOW_USE_MKL == 0
diff --git a/src/operator/c_lapack_api.h b/src/operator/c_lapack_api.h
index 46c8b963f429..cd69775547b4 100644
--- a/src/operator/c_lapack_api.h
+++ b/src/operator/c_lapack_api.h
@@ -324,42 +324,26 @@ inline void flip(int m, int n, DType *b, int ldb, DType *a, int lda) {
#else
- // use pragma message instead of warning
- #pragma message("Warning: lapack usage not enabled, linalg-operators will not be available." \
- " Ensure that lapack library is installed and build with USE_LAPACK=1 to get lapack" \
- " functionalities.")
+
#define MXNET_LAPACK_ROW_MAJOR 101
#define MXNET_LAPACK_COL_MAJOR 102
// Define compilable stubs.
#define MXNET_LAPACK_CWRAPPER1(func, dtype) \
- inline int MXNET_LAPACK_##func(int matrix_layout, char uplo, int n, dtype* a, int lda) { \
- LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
- return 1; \
- }
+ int MXNET_LAPACK_##func(int matrix_layout, char uplo, int n, dtype* a, int lda);
#define MXNET_LAPACK_CWRAPPER2(func, dtype) \
- inline int MXNET_LAPACK_##func(int matrix_layout, int m, int n, dtype* a, \
- int lda, dtype* tau, dtype* work, int lwork) { \
- LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
- return 1; \
- }
+ int MXNET_LAPACK_##func(int matrix_layout, int m, int n, dtype* a, \
+ int lda, dtype* tau, dtype* work, int lwork);
#define MXNET_LAPACK_CWRAPPER3(func, dtype) \
- inline int MXNET_LAPACK_##func(int matrix_layout, char uplo, int n, dtype *a, \
+ int MXNET_LAPACK_##func(int matrix_layout, char uplo, int n, dtype *a, \
int lda, dtype *w, dtype *work, int lwork, \
- int *iwork, int liwork) { \
- LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
- return 1; \
- }
+ int *iwork, int liwork);
#define MXNET_LAPACK_UNAVAILABLE(func) \
- inline int mxnet_lapack_##func(...) { \
- LOG(FATAL) << "MXNet build without lapack. Function " << #func << " is not available."; \
- return 1; \
- }
-
+ int mxnet_lapack_##func(...);
MXNET_LAPACK_CWRAPPER1(spotrf, float)
MXNET_LAPACK_CWRAPPER1(dpotrf, double)
MXNET_LAPACK_CWRAPPER1(spotri, float)
@@ -375,7 +359,10 @@ inline void flip(int m, int n, DType *b, int ldb, DType *a, int lda) {
MXNET_LAPACK_CWRAPPER3(ssyevd, float)
MXNET_LAPACK_CWRAPPER3(dsyevd, double)
-
+ #undef MXNET_LAPACK_CWRAPPER1
+ #undef MXNET_LAPACK_CWRAPPER2
+ #undef MXNET_LAPACK_CWRAPPER3
+ #undef MXNET_LAPACK_UNAVAILABLE
#endif
template
diff --git a/src/operator/contrib/adamw-inl.h b/src/operator/contrib/adamw-inl.h
new file mode 100644
index 000000000000..3d76b33ae765
--- /dev/null
+++ b/src/operator/contrib/adamw-inl.h
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2016 by Contributors
+ * \file optimizer_op-inl.h
+ * \brief Optimizer operators
+ * \author Haibin Lin
+ */
+#ifndef MXNET_OPERATOR_CONTRIB_ADAMW_INL_H_
+#define MXNET_OPERATOR_CONTRIB_ADAMW_INL_H_
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "../operator_common.h"
+#include "../mshadow_op.h"
+#include "../elemwise_op_common.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+struct AdamWParam : public dmlc::Parameter {
+ float lr;
+ float beta1;
+ float beta2;
+ float epsilon;
+ float wd;
+ float eta;
+ float rescale_grad;
+ float clip_gradient;
+ DMLC_DECLARE_PARAMETER(AdamWParam) {
+ DMLC_DECLARE_FIELD(lr)
+ .describe("Learning rate");
+ DMLC_DECLARE_FIELD(beta1)
+ .set_default(0.9f)
+ .describe("The decay rate for the 1st moment estimates.");
+ DMLC_DECLARE_FIELD(beta2)
+ .set_default(0.999f)
+ .describe("The decay rate for the 2nd moment estimates.");
+ DMLC_DECLARE_FIELD(epsilon)
+ .set_default(1e-8f)
+ .describe("A small constant for numerical stability.");
+ DMLC_DECLARE_FIELD(wd)
+ .set_default(0.0f)
+ .describe("Weight decay augments the objective function with a "
+ "regularization term that penalizes large weights. "
+ "The penalty scales with the square of the magnitude of each weight.");
+ DMLC_DECLARE_FIELD(eta)
+ .describe("Learning rate schedule multiplier");
+ DMLC_DECLARE_FIELD(rescale_grad)
+ .set_default(1.0f)
+ .describe("Rescale gradient to grad = rescale_grad*grad.");
+ DMLC_DECLARE_FIELD(clip_gradient)
+ .set_default(-1.0f)
+ .describe("Clip gradient to the range of [-clip_gradient, clip_gradient] "
+ "If clip_gradient <= 0, gradient clipping is turned off. "
+ "grad = max(min(grad, clip_gradient), -clip_gradient).");
+ }
+};
+
+/*
+ * \brief adam_w update.
+ */
+template
+inline void AdamWUpdate(const nnvm::NodeAttrs& attrs,
+ const OpContext &ctx,
+ const std::vector &inputs,
+ const std::vector &req,
+ const std::vector &outputs) {
+ using namespace mshadow;
+ using namespace mshadow::expr;
+ using namespace mshadow_op;
+ const AdamWParam& param = nnvm::get(attrs.parsed);
+ Stream* s = ctx.get_stream();
+ MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
+ Tensor weight = inputs[0].FlatTo2D(s);
+ Tensor grad = inputs[1].FlatTo2D(s);
+ Tensor mean = inputs[2].FlatTo2D(s);
+ Tensor var = inputs[3].FlatTo2D(s);
+ Tensor out = outputs[0].FlatTo2D(s);
+
+ grad = scalar(param.rescale_grad) * grad;
+ if (param.clip_gradient >= 0.0f) {
+ mean = scalar(param.beta1)*mean + scalar(1.f-param.beta1) *
+ F(grad, DType(param.clip_gradient));
+ var = scalar(param.beta2)*var + scalar(1.f-param.beta2)*F(
+ F(grad, DType(param.clip_gradient)));
+ } else {
+ mean = scalar(param.beta1)*mean + scalar(1.f-param.beta1) * grad;
+ var = scalar(param.beta2)*var + scalar(1.f-param.beta2) * F(grad);
+ }
+ Assign(out, req[0],
+ weight -
+ scalar(param.eta) * (scalar(param.lr) *
+ mean / (F(var) + scalar(param.epsilon)) +
+ (scalar(param.wd) * weight)));
+ });
+}
+
+} // namespace op
+} // namespace mxnet
+
+#endif // MXNET_OPERATOR_CONTRIB_ADAMW_INL_H_
diff --git a/src/operator/contrib/adamw.cc b/src/operator/contrib/adamw.cc
new file mode 100644
index 000000000000..94623fe08a9e
--- /dev/null
+++ b/src/operator/contrib/adamw.cc
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2016 by Contributors
+ * \file optimizer_op.cc
+ * \brief Optimizer operators
+ * \author Haibin Lin
+ */
+#include "./adamw-inl.h"
+
+namespace mxnet {
+namespace op {
+
+DMLC_REGISTER_PARAMETER(AdamWParam);
+
+NNVM_REGISTER_OP(_contrib_adamw_update)
+.describe(R"code(Update function for AdamW optimizer. AdamW is seen as a modification of
+Adam by decoupling the weight decay from the optimization steps taken w.r.t. the loss function.
+
+Adam update consists of the following steps, where g represents gradient and m, v
+are 1st and 2nd order moment estimates (mean and variance).
+
+.. math::
+
+ g_t = \nabla J(W_{t-1})\\
+ m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\
+ v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\
+ W_t = W_{t-1} - \eta_t (\alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } + wd W_{t-1})
+
+It updates the weights using::
+
+ m = beta1*m + (1-beta1)*grad
+ v = beta2*v + (1-beta2)*(grad**2)
+ w -= eta * (learning_rate * m / (sqrt(v) + epsilon) + w * wd)
+
+)code" ADD_FILELINE)
+.set_num_inputs(4)
+.set_num_outputs(1)
+.set_attr_parser(ParamParser)
+.set_attr("FInferShape", ElemwiseShape<4, 1>)
+.set_attr("FInferType", ElemwiseType<4, 1>)
+.set_attr("FMutateInputs",
+ [](const nnvm::NodeAttrs& attrs) {
+ return std::vector{2, 3};
+ })
+.set_attr("FCompute", AdamWUpdate)
+.add_argument("weight", "NDArray-or-Symbol", "Weight")
+.add_argument("grad", "NDArray-or-Symbol", "Gradient")
+.add_argument("mean", "NDArray-or-Symbol", "Moving mean")
+.add_argument("var", "NDArray-or-Symbol", "Moving variance")
+.add_arguments(AdamWParam::__FIELDS__());
+
+} // namespace op
+} // namespace mxnet
diff --git a/src/operator/contrib/adamw.cu b/src/operator/contrib/adamw.cu
new file mode 100644
index 000000000000..b7452f861e2d
--- /dev/null
+++ b/src/operator/contrib/adamw.cu
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2018 by Contributors
+ * \file adamw.cu
+ * \brief Optimizer operators
+ * \author Haibin Lin
+ */
+#include "./adamw-inl.h"
+
+namespace mxnet {
+namespace op {
+
+NNVM_REGISTER_OP(_contrib_adamw_update)
+.set_attr("FCompute", AdamWUpdate);
+
+} // namespace op
+} // namespace mxnet
diff --git a/src/operator/contrib/bounding_box-inl.h b/src/operator/contrib/bounding_box-inl.h
index 8e963461ec06..031dd952d386 100644
--- a/src/operator/contrib/bounding_box-inl.h
+++ b/src/operator/contrib/bounding_box-inl.h
@@ -785,7 +785,7 @@ void BipartiteMatchingForward(const nnvm::NodeAttrs& attrs,
.get_with_shape(Shape2(batch_size, col), s);
Shape<1> sort_index_shape = Shape1(dshape.Size());
index_t workspace_size = sort_index_shape.Size();
- workspace_size += ((sort_index_shape.Size() * sizeof(int32_t) - 1) / sizeof(DType)) * 2;
+ workspace_size += (sort_index_shape.Size() * 2 * sizeof(int32_t) - 1) / sizeof(DType) + 1;
Tensor workspace = ctx.requested[0]
.get_space_typed(Shape1(workspace_size), s);
Tensor scores_copy(workspace.dptr_,
diff --git a/src/operator/contrib/dgl_graph.cc b/src/operator/contrib/dgl_graph.cc
index 6d586755c957..a03cbef0b5ca 100644
--- a/src/operator/contrib/dgl_graph.cc
+++ b/src/operator/contrib/dgl_graph.cc
@@ -413,21 +413,6 @@ static bool CSRNeighborNonUniformSampleType(const nnvm::NodeAttrs& attrs,
return success;
}
-/*
- * Get src vertex and edge id for a destination vertex
- */
-static void GetSrcList(const dgl_id_t* val_list,
- const dgl_id_t* col_list,
- const dgl_id_t* indptr,
- const dgl_id_t dst_id,
- std::vector* src_list,
- std::vector* edge_list) {
- for (dgl_id_t i = *(indptr+dst_id); i < *(indptr+dst_id+1); ++i) {
- src_list->push_back(col_list[i]);
- edge_list->push_back(val_list[i]);
- }
-}
-
static void RandomSample(size_t set_size,
size_t num,
std::vector* out,
@@ -464,34 +449,34 @@ static void NegateSet(const std::vector &idxs,
/*
* Uniform sample
*/
-static void GetUniformSample(const std::vector& ver_list,
- const std::vector& edge_list,
+static void GetUniformSample(const dgl_id_t* val_list,
+ const dgl_id_t* col_list,
+ const size_t ver_len,
const size_t max_num_neighbor,
std::vector* out_ver,
std::vector* out_edge,
unsigned int* seed) {
- CHECK_EQ(ver_list.size(), edge_list.size());
// Copy ver_list to output
- if (ver_list.size() <= max_num_neighbor) {
- for (size_t i = 0; i < ver_list.size(); ++i) {
- out_ver->push_back(ver_list[i]);
- out_edge->push_back(edge_list[i]);
+ if (ver_len <= max_num_neighbor) {
+ for (size_t i = 0; i < ver_len; ++i) {
+ out_ver->push_back(col_list[i]);
+ out_edge->push_back(val_list[i]);
}
return;
}
// If we just sample a small number of elements from a large neighbor list.
std::vector sorted_idxs;
- if (ver_list.size() > max_num_neighbor * 2) {
+ if (ver_len > max_num_neighbor * 2) {
sorted_idxs.reserve(max_num_neighbor);
- RandomSample(ver_list.size(), max_num_neighbor, &sorted_idxs, seed);
+ RandomSample(ver_len, max_num_neighbor, &sorted_idxs, seed);
std::sort(sorted_idxs.begin(), sorted_idxs.end());
} else {
std::vector negate;
- negate.reserve(ver_list.size() - max_num_neighbor);
- RandomSample(ver_list.size(), ver_list.size() - max_num_neighbor,
+ negate.reserve(ver_len - max_num_neighbor);
+ RandomSample(ver_len, ver_len - max_num_neighbor,
&negate, seed);
std::sort(negate.begin(), negate.end());
- NegateSet(negate, ver_list.size(), &sorted_idxs);
+ NegateSet(negate, ver_len, &sorted_idxs);
}
// verify the result.
CHECK_EQ(sorted_idxs.size(), max_num_neighbor);
@@ -499,8 +484,8 @@ static void GetUniformSample(const std::vector& ver_list,
CHECK_GT(sorted_idxs[i], sorted_idxs[i - 1]);
}
for (auto idx : sorted_idxs) {
- out_ver->push_back(ver_list[idx]);
- out_edge->push_back(edge_list[idx]);
+ out_ver->push_back(col_list[idx]);
+ out_edge->push_back(val_list[idx]);
}
}
@@ -508,26 +493,26 @@ static void GetUniformSample(const std::vector& ver_list,
* Non-uniform sample via ArrayHeap
*/
static void GetNonUniformSample(const float* probability,
- const std::vector& ver_list,
- const std::vector& edge_list,
+ const dgl_id_t* val_list,
+ const dgl_id_t* col_list,
+ const size_t ver_len,
const size_t max_num_neighbor,
std::vector* out_ver,
std::vector* out_edge,
unsigned int* seed) {
- CHECK_EQ(ver_list.size(), edge_list.size());
// Copy ver_list to output
- if (ver_list.size() <= max_num_neighbor) {
- for (size_t i = 0; i < ver_list.size(); ++i) {
- out_ver->push_back(ver_list[i]);
- out_edge->push_back(edge_list[i]);
+ if (ver_len <= max_num_neighbor) {
+ for (size_t i = 0; i < ver_len; ++i) {
+ out_ver->push_back(col_list[i]);
+ out_edge->push_back(val_list[i]);
}
return;
}
// Make sample
std::vector sp_index(max_num_neighbor);
- std::vector sp_prob(ver_list.size());
- for (size_t i = 0; i < ver_list.size(); ++i) {
- sp_prob[i] = probability[ver_list[i]];
+ std::vector sp_prob(ver_len);
+ for (size_t i = 0; i < ver_len; ++i) {
+ sp_prob[i] = probability[col_list[i]];
}
ArrayHeap arrayHeap(sp_prob);
arrayHeap.SampleWithoutReplacement(max_num_neighbor, &sp_index, seed);
@@ -535,21 +520,13 @@ static void GetNonUniformSample(const float* probability,
out_edge->resize(max_num_neighbor);
for (size_t i = 0; i < max_num_neighbor; ++i) {
size_t idx = sp_index[i];
- out_ver->at(i) = ver_list[idx];
- out_edge->at(i) = edge_list[idx];
+ out_ver->at(i) = col_list[idx];
+ out_edge->at(i) = val_list[idx];
}
sort(out_ver->begin(), out_ver->end());
sort(out_edge->begin(), out_edge->end());
}
-/*
- * This is used for BFS traversal
- */
-struct ver_node {
- dgl_id_t vertex_id;
- int level;
-};
-
/*
* Used for subgraph sampling
*/
@@ -571,9 +548,9 @@ static void SampleSubgraph(const NDArray &csr,
float* sub_prob,
const NDArray &sub_layer,
const float* probability,
- dgl_id_t num_hops,
- dgl_id_t num_neighbor,
- dgl_id_t max_num_vertices) {
+ int num_hops,
+ size_t num_neighbor,
+ size_t max_num_vertices) {
unsigned int time_seed = time(nullptr);
size_t num_seeds = seed_arr.shape().Size();
CHECK_GE(max_num_vertices, num_seeds);
@@ -586,123 +563,119 @@ static void SampleSubgraph(const NDArray &csr,
dgl_id_t* out_layer = sub_layer.data().dptr();
// BFS traverse the graph and sample vertices
- dgl_id_t sub_vertices_count = 0;
//
- std::unordered_map sub_ver_mp;
- std::queue node_queue;
+ std::unordered_set sub_ver_mp;
+ std::vector > sub_vers;
+ sub_vers.reserve(num_seeds * 10);
// add seed vertices
for (size_t i = 0; i < num_seeds; ++i) {
- ver_node node;
- node.vertex_id = seed[i];
- node.level = 0;
- node_queue.push(node);
+ auto ret = sub_ver_mp.insert(seed[i]);
+ // If the vertex is inserted successfully.
+ if (ret.second) {
+ sub_vers.emplace_back(seed[i], 0);
+ }
}
- std::vector tmp_src_list;
- std::vector tmp_edge_list;
std::vector tmp_sampled_src_list;
std::vector tmp_sampled_edge_list;
- std::unordered_map neigh_mp;
+ // ver_id, position
+ std::vector > neigh_pos;
+ neigh_pos.reserve(num_seeds);
+ std::vector neighbor_list;
size_t num_edges = 0;
- while (!node_queue.empty() &&
- sub_vertices_count <= max_num_vertices ) {
- ver_node& cur_node = node_queue.front();
- dgl_id_t dst_id = cur_node.vertex_id;
- if (cur_node.level < num_hops) {
- auto ret = sub_ver_mp.find(dst_id);
- if (ret != sub_ver_mp.end()) {
- node_queue.pop();
- continue;
- }
- tmp_src_list.clear();
- tmp_edge_list.clear();
- tmp_sampled_src_list.clear();
- tmp_sampled_edge_list.clear();
- GetSrcList(val_list,
- col_list,
- indptr,
- dst_id,
- &tmp_src_list,
- &tmp_edge_list);
- if (probability == nullptr) { // uniform-sample
- GetUniformSample(tmp_src_list,
- tmp_edge_list,
+
+ // sub_vers is used both as a node collection and a queue.
+ // In the while loop, we iterate over sub_vers and new nodes are added to the vector.
+ // A vertex in the vector only needs to be accessed once. If there is a vertex behind idx
+ // isn't in the last level, we will sample its neighbors. If not, the while loop terminates.
+ size_t idx = 0;
+ while (idx < sub_vers.size() &&
+ sub_ver_mp.size() < max_num_vertices) {
+ dgl_id_t dst_id = sub_vers[idx].first;
+ int cur_node_level = sub_vers[idx].second;
+ idx++;
+ // If the node is in the last level, we don't need to sample neighbors
+ // from this node.
+ if (cur_node_level >= num_hops)
+ continue;
+
+ tmp_sampled_src_list.clear();
+ tmp_sampled_edge_list.clear();
+ dgl_id_t ver_len = *(indptr+dst_id+1) - *(indptr+dst_id);
+ if (probability == nullptr) { // uniform-sample
+ GetUniformSample(val_list + *(indptr + dst_id),
+ col_list + *(indptr + dst_id),
+ ver_len,
num_neighbor,
&tmp_sampled_src_list,
&tmp_sampled_edge_list,
&time_seed);
- } else { // non-uniform-sample
- GetNonUniformSample(probability,
- tmp_src_list,
- tmp_edge_list,
+ } else { // non-uniform-sample
+ GetNonUniformSample(probability,
+ val_list + *(indptr + dst_id),
+ col_list + *(indptr + dst_id),
+ ver_len,
num_neighbor,
&tmp_sampled_src_list,
&tmp_sampled_edge_list,
&time_seed);
- }
- neigh_mp.insert(std::pair(dst_id,
- neigh_list(tmp_sampled_src_list,
- tmp_sampled_edge_list)));
- num_edges += tmp_sampled_src_list.size();
- sub_ver_mp[cur_node.vertex_id] = cur_node.level;
- for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
- auto ret = sub_ver_mp.find(tmp_sampled_src_list[i]);
- if (ret == sub_ver_mp.end()) {
- ver_node new_node;
- new_node.vertex_id = tmp_sampled_src_list[i];
- new_node.level = cur_node.level + 1;
- node_queue.push(new_node);
- }
- }
- } else { // vertex without any neighbor
- auto ret = sub_ver_mp.find(dst_id);
- if (ret != sub_ver_mp.end()) {
- node_queue.pop();
- continue;
- }
- tmp_sampled_src_list.clear();
- tmp_sampled_edge_list.clear();
- neigh_mp.insert(std::pair(dst_id,
- neigh_list(tmp_sampled_src_list, // empty vector
- tmp_sampled_edge_list))); // empty vector
- sub_ver_mp[cur_node.vertex_id] = cur_node.level;
}
- sub_vertices_count++;
- node_queue.pop();
+ CHECK_EQ(tmp_sampled_src_list.size(), tmp_sampled_edge_list.size());
+ size_t pos = neighbor_list.size();
+ neigh_pos.emplace_back(dst_id, pos);
+ // First we push the size of neighbor vector
+ neighbor_list.push_back(tmp_sampled_edge_list.size());
+ // Then push the vertices
+ for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
+ neighbor_list.push_back(tmp_sampled_src_list[i]);
+ }
+ // Finally we push the edge list
+ for (size_t i = 0; i < tmp_sampled_edge_list.size(); ++i) {
+ neighbor_list.push_back(tmp_sampled_edge_list[i]);
+ }
+ num_edges += tmp_sampled_src_list.size();
+ for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
+ // If we have sampled the max number of vertices, we have to stop.
+ if (sub_ver_mp.size() >= max_num_vertices)
+ break;
+ // We need to add the neighbor in the hashtable here. This ensures that
+ // the vertex in the queue is unique. If we see a vertex before, we don't
+ // need to add it to the queue again.
+ auto ret = sub_ver_mp.insert(tmp_sampled_src_list[i]);
+ // If the sampled neighbor is inserted to the map successfully.
+ if (ret.second)
+ sub_vers.emplace_back(tmp_sampled_src_list[i], cur_node_level + 1);
+ }
+ }
+ // Let's check if there is a vertex that we haven't sampled its neighbors.
+ for (; idx < sub_vers.size(); idx++) {
+ if (sub_vers[idx].second < num_hops) {
+ LOG(WARNING)
+ << "The sampling is truncated because we have reached the max number of vertices\n"
+ << "Please use a smaller number of seeds or a small neighborhood";
+ break;
+ }
}
// Copy sub_ver_mp to output[0]
- size_t idx = 0;
- for (auto& data : sub_ver_mp) {
- *(out+idx) = data.first;
- idx++;
- }
+ // Copy layer
size_t num_vertices = sub_ver_mp.size();
- std::sort(out, out + num_vertices);
- // The rest data will be set to -1
- for (dgl_id_t i = idx; i < max_num_vertices; ++i) {
- *(out+i) = -1;
+ std::sort(sub_vers.begin(), sub_vers.end(),
+ [](const std::pair &a1, const std::pair &a2) {
+ return a1.first < a2.first;
+ });
+ for (size_t i = 0; i < sub_vers.size(); i++) {
+ out[i] = sub_vers[i].first;
+ out_layer[i] = sub_vers[i].second;
}
// The last element stores the actual
// number of vertices in the subgraph.
out[max_num_vertices] = sub_ver_mp.size();
+
// Copy sub_probability
if (sub_prob != nullptr) {
- for (dgl_id_t i = 0; i < max_num_vertices; ++i) {
+ for (size_t i = 0; i < sub_ver_mp.size(); ++i) {
dgl_id_t idx = out[i];
- if (idx != -1) {
- sub_prob[i] = probability[idx];
- } else {
- sub_prob[i] = -1;
- }
- }
- }
- // Copy layer
- for (dgl_id_t i = 0; i < max_num_vertices; ++i) {
- dgl_id_t idx = out[i];
- if (idx != -1) {
- out_layer[i] = sub_ver_mp[idx];
- } else {
- out_layer[i] = -1;
+ sub_prob[i] = probability[idx];
}
}
// Construct sub_csr_graph
@@ -718,20 +691,37 @@ static void SampleSubgraph(const NDArray &csr,
dgl_id_t* indptr_out = sub_csr.aux_data(0).dptr();
indptr_out[0] = 0;
size_t collected_nedges = 0;
+
+ // Both the out array and neigh_pos are sorted. By scanning the two arrays, we can see
+ // which vertices have neighbors and which don't.
+ std::sort(neigh_pos.begin(), neigh_pos.end(),
+ [](const std::pair &a1, const std::pair &a2) {
+ return a1.first < a2.first;
+ });
+ size_t idx_with_neigh = 0;
for (size_t i = 0; i < num_vertices; i++) {
dgl_id_t dst_id = *(out + i);
- auto it = neigh_mp.find(dst_id);
- const auto &edges = it->second.edges;
- const auto &neighs = it->second.neighs;
- CHECK_EQ(edges.size(), neighs.size());
- if (!edges.empty()) {
- std::copy(edges.begin(), edges.end(), val_list_out + collected_nedges);
- std::copy(neighs.begin(), neighs.end(), col_list_out + collected_nedges);
- collected_nedges += edges.size();
+ // If a vertex is in sub_ver_mp but not in neigh_pos, this vertex must not
+ // have edges.
+ size_t edge_size = 0;
+ if (idx_with_neigh < neigh_pos.size() && dst_id == neigh_pos[idx_with_neigh].first) {
+ size_t pos = neigh_pos[idx_with_neigh].second;
+ CHECK_LT(pos, neighbor_list.size());
+ edge_size = neighbor_list[pos];
+ CHECK_LE(pos + edge_size * 2 + 1, neighbor_list.size());
+
+ std::copy_n(neighbor_list.begin() + pos + 1,
+ edge_size,
+ col_list_out + collected_nedges);
+ std::copy_n(neighbor_list.begin() + pos + edge_size + 1,
+ edge_size,
+ val_list_out + collected_nedges);
+ collected_nedges += edge_size;
+ idx_with_neigh++;
}
- indptr_out[i+1] = indptr_out[i] + edges.size();
+ indptr_out[i+1] = indptr_out[i] + edge_size;
}
- for (dgl_id_t i = num_vertices+1; i <= max_num_vertices; ++i) {
+ for (size_t i = num_vertices+1; i <= max_num_vertices; ++i) {
indptr_out[i] = indptr_out[i-1];
}
}
@@ -766,8 +756,16 @@ static void CSRNeighborUniformSampleComputeExCPU(const nnvm::NodeAttrs& attrs,
}
NNVM_REGISTER_OP(_contrib_dgl_csr_neighbor_uniform_sample)
-.describe(R"code(This operator samples sub-graph from a csr graph via an
-uniform probability.
+.describe(R"code(This operator samples sub-graphs from a csr graph via an
+uniform probability. The operator is designed for DGL.
+
+The operator outputs three sets of NDArrays to represent the sampled results
+(the number of NDArrays in each set is the same as the number of seed NDArrays):
+1) a set of 1D NDArrays containing the sampled vertices, 2) a set of CSRNDArrays representing
+the sampled edges, 3) a set of 1D NDArrays indicating the layer where a vertex is sampled.
+The first set of 1D NDArrays have a length of max_num_vertices+1. The last element in an NDArray
+indicate the acutal number of vertices in a subgraph. The third set of NDArrays have a length
+of max_num_vertices, and the valid number of vertices is the same as the ones in the first set.
Example:
@@ -853,7 +851,16 @@ static void CSRNeighborNonUniformSampleComputeExCPU(const nnvm::NodeAttrs& attrs
NNVM_REGISTER_OP(_contrib_dgl_csr_neighbor_non_uniform_sample)
.describe(R"code(This operator samples sub-graph from a csr graph via an
-uniform probability.
+non-uniform probability. The operator is designed for DGL.
+
+The operator outputs four sets of NDArrays to represent the sampled results
+(the number of NDArrays in each set is the same as the number of seed NDArrays):
+1) a set of 1D NDArrays containing the sampled vertices, 2) a set of CSRNDArrays representing
+the sampled edges, 3) a set of 1D NDArrays with the probability that vertices are sampled,
+4) a set of 1D NDArrays indicating the layer where a vertex is sampled.
+The first set of 1D NDArrays have a length of max_num_vertices+1. The last element in an NDArray
+indicate the acutal number of vertices in a subgraph. The third and fourth set of NDArrays have a length
+of max_num_vertices, and the valid number of vertices is the same as the ones in the first set.
Example:
diff --git a/src/operator/contrib/multibox_detection.cc b/src/operator/contrib/multibox_detection.cc
index c005dfa06590..b4f66d8fcf1d 100644
--- a/src/operator/contrib/multibox_detection.cc
+++ b/src/operator/contrib/multibox_detection.cc
@@ -174,7 +174,6 @@ inline void MultiBoxDetectionForward(const Tensor &out,
}
// apply nms
-#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < nkeep; ++i) {
int offset_i = i * 6;
if (p_out[offset_i] < 0) continue; // skip eliminated
diff --git a/src/operator/contrib/nnvm_to_onnx-inl.h b/src/operator/contrib/nnvm_to_onnx-inl.h
index 58f88b051433..011ffe6b7ddb 100644
--- a/src/operator/contrib/nnvm_to_onnx-inl.h
+++ b/src/operator/contrib/nnvm_to_onnx-inl.h
@@ -37,7 +37,6 @@
#include
#include
-#include
#include
#include
@@ -49,13 +48,48 @@
#include
#include
-#include "./tensorrt-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"
#include "../../common/serialization.h"
namespace mxnet {
namespace op {
+
+namespace nnvm_to_onnx {
+ enum class TypeIO { Inputs = 0, Outputs = 1 };
+ using NameToIdx_t = std::map;
+ using InferenceTuple_t = std::tuple;
+ using InferenceMap_t = std::map;
+} // namespace nnvm_to_onnx
+
+struct ONNXParam : public dmlc::Parameter {
+ std::string serialized_onnx_graph;
+ std::string serialized_input_map;
+ std::string serialized_output_map;
+ nnvm_to_onnx::NameToIdx_t input_map;
+ nnvm_to_onnx::InferenceMap_t output_map;
+ ::onnx::ModelProto onnx_pb_graph;
+
+ ONNXParam() {}
+
+ ONNXParam(const ::onnx::ModelProto& onnx_graph,
+ const nnvm_to_onnx::InferenceMap_t& input_map,
+ const nnvm_to_onnx::NameToIdx_t& output_map) {
+ common::Serialize(input_map, &serialized_input_map);
+ common::Serialize(output_map, &serialized_output_map);
+ onnx_graph.SerializeToString(&serialized_onnx_graph);
+ }
+
+DMLC_DECLARE_PARAMETER(ONNXParam) {
+ DMLC_DECLARE_FIELD(serialized_onnx_graph)
+ .describe("Serialized ONNX graph");
+ DMLC_DECLARE_FIELD(serialized_input_map)
+ .describe("Map from inputs to topological order as input.");
+ DMLC_DECLARE_FIELD(serialized_output_map)
+ .describe("Map from outputs to order in g.outputs.");
+ }
+};
+
namespace nnvm_to_onnx {
using namespace nnvm;
@@ -76,7 +110,7 @@ void ConvertConstant(GraphProto* const graph_proto,
const std::string& node_name,
std::unordered_map* const shared_buffer);
-void ConvertOutput(op::tensorrt::InferenceMap_t* const trt_output_map,
+void ConvertOutput(op::nnvm_to_onnx::InferenceMap_t* const trt_output_map,
GraphProto* const graph_proto,
const std::unordered_map::iterator& out_iter,
const std::string& node_name,
@@ -133,7 +167,7 @@ void ConvertElementwiseAdd(NodeProto *node_proto,
const nnvm::IndexedGraph &ig,
const array_view &inputs);
-TRTParam ConvertNnvmGraphToOnnx(
+ONNXParam ConvertNnvmGraphToOnnx(
const nnvm::Graph &g,
std::unordered_map *const shared_buffer);
diff --git a/src/operator/contrib/nnvm_to_onnx.cc b/src/operator/contrib/nnvm_to_onnx.cc
index 902466614c7c..784384e94e1e 100644
--- a/src/operator/contrib/nnvm_to_onnx.cc
+++ b/src/operator/contrib/nnvm_to_onnx.cc
@@ -47,7 +47,6 @@
#include "../../operator/nn/fully_connected-inl.h"
#include "../../operator/nn/pooling-inl.h"
#include "../../operator/softmax_output-inl.h"
-#include "./tensorrt-inl.h"
#if MXNET_USE_TENSORRT_ONNX_CHECKER
#include
@@ -55,14 +54,17 @@
namespace mxnet {
namespace op {
+
+DMLC_REGISTER_PARAMETER(ONNXParam);
+
namespace nnvm_to_onnx {
-op::TRTParam ConvertNnvmGraphToOnnx(
+op::ONNXParam ConvertNnvmGraphToOnnx(
const nnvm::Graph& g,
std::unordered_map* const shared_buffer) {
- op::TRTParam trt_param;
- op::tensorrt::NameToIdx_t trt_input_map;
- op::tensorrt::InferenceMap_t trt_output_map;
+ op::ONNXParam onnx_param;
+ op::nnvm_to_onnx::NameToIdx_t onnx_input_map;
+ op::nnvm_to_onnx::InferenceMap_t onnx_output_map;
const nnvm::IndexedGraph& ig = g.indexed_graph();
const auto& storage_types = g.GetAttr("storage_type");
@@ -105,7 +107,7 @@ op::TRTParam ConvertNnvmGraphToOnnx(
current_input++;
continue;
}
- trt_input_map.emplace(node_name, current_input++);
+ onnx_input_map.emplace(node_name, current_input++);
ConvertPlaceholder(node_name, placeholder_shapes, graph_proto);
} else {
// If it's not a placeholder, then by exclusion it's a constant.
@@ -140,23 +142,23 @@ op::TRTParam ConvertNnvmGraphToOnnx(
auto out_iter = output_lookup.find(node_name);
// We found an output
if (out_iter != output_lookup.end()) {
- ConvertOutput(&trt_output_map, graph_proto, out_iter, node_name, g,
+ ConvertOutput(&onnx_output_map, graph_proto, out_iter, node_name, g,
storage_types, dtypes);
} // output found
} // conversion function exists
} // loop over i from 0 to num_nodes
- model_proto.SerializeToString(&trt_param.serialized_onnx_graph);
- common::Serialize(trt_input_map,
- &trt_param.serialized_input_map);
- common::Serialize(trt_output_map,
- &trt_param.serialized_output_map);
+ model_proto.SerializeToString(&onnx_param.serialized_onnx_graph);
+ common::Serialize(onnx_input_map,
+ &onnx_param.serialized_input_map);
+ common::Serialize(onnx_output_map,
+ &onnx_param.serialized_output_map);
#if MXNET_USE_TENSORRT_ONNX_CHECKER
onnx::checker::check_model(model_proto);
#endif // MXNET_USE_TENSORRT_ONNX_CHECKER
- return trt_param;
+ return onnx_param;
}
void ConvertConvolution(NodeProto* node_proto, const NodeAttrs& attrs,
@@ -489,7 +491,7 @@ void ConvertConstant(
}
void ConvertOutput(
- op::tensorrt::InferenceMap_t* const trt_output_map,
+ op::nnvm_to_onnx::InferenceMap_t* const output_map,
GraphProto* const graph_proto,
const std::unordered_map::iterator& out_iter,
const std::string& node_name, const nnvm::Graph& g,
@@ -501,10 +503,10 @@ void ConvertOutput(
int dtype = dtypes[out_idx];
// This should work with fp16 as well
- op::tensorrt::InferenceTuple_t out_tuple{out_iter->second, out_shape, storage_type,
+ op::nnvm_to_onnx::InferenceTuple_t out_tuple{out_iter->second, out_shape, storage_type,
dtype};
- trt_output_map->emplace(node_name, out_tuple);
+ output_map->emplace(node_name, out_tuple);
auto graph_out = graph_proto->add_output();
auto tensor_type = graph_out->mutable_type()->mutable_tensor_type();
diff --git a/src/operator/contrib/roi_align-inl.h b/src/operator/contrib/roi_align-inl.h
index 263f72a6abc0..9f4d7ce48827 100644
--- a/src/operator/contrib/roi_align-inl.h
+++ b/src/operator/contrib/roi_align-inl.h
@@ -20,7 +20,7 @@
* Copyright (c) 2018 by Contributors
* \file roi_align-inl.h
* \brief roi align operator and symbol
- * \author Hang Zhang
+ * \author Hang Zhang, Shesung
* modified from Caffe2
*/
#ifndef MXNET_OPERATOR_CONTRIB_ROI_ALIGN_INL_H_
@@ -35,7 +35,6 @@
namespace mxnet {
namespace op {
-
// Declare enumeration of input order to make code more intuitive.
// These enums are only visible within this header
namespace roialign {
@@ -48,6 +47,7 @@ struct ROIAlignParam : public dmlc::Parameter {
TShape pooled_size;
float spatial_scale;
int sample_ratio;
+ bool position_sensitive;
DMLC_DECLARE_PARAMETER(ROIAlignParam) {
DMLC_DECLARE_FIELD(pooled_size)
.set_expect_ndim(2).enforce_nonzero()
@@ -57,6 +57,10 @@ struct ROIAlignParam : public dmlc::Parameter {
"Equals the reciprocal of total stride in convolutional layers");
DMLC_DECLARE_FIELD(sample_ratio).set_default(-1)
.describe("Optional sampling ratio of ROI align, using adaptive size by default.");
+ DMLC_DECLARE_FIELD(position_sensitive).set_default(false)
+ .describe("Whether to perform position-sensitive RoI pooling. PSRoIPooling is "
+ "first proposaled by R-FCN and it can reduce the input channels by ph*pw times, "
+ "where (ph, pw) is the pooled_size");
}
};
diff --git a/src/operator/contrib/roi_align.cc b/src/operator/contrib/roi_align.cc
index 76675677fa08..e584ea30325d 100644
--- a/src/operator/contrib/roi_align.cc
+++ b/src/operator/contrib/roi_align.cc
@@ -20,7 +20,7 @@
* Copyright (c) 2018 by Contributors
* \file roi_align.cc
* \brief roi align operator
- * \author Hang Zhang
+ * \author Hang Zhang, Shesung
* Adapted from Caffe2
*/
#include "./roi_align-inl.h"
@@ -142,6 +142,7 @@ void ROIAlignForward(
const int nthreads,
const T* bottom_data,
const T& spatial_scale,
+ const bool position_sensitive,
const int channels,
const int height,
const int width,
@@ -156,6 +157,8 @@ void ROIAlignForward(
int n_rois = nthreads / channels / pooled_width / pooled_height;
// (n, c, ph, pw) is an element in the pooled output
// can be parallelized using omp
+#pragma omp parallel for \
+num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int n = 0; n < n_rois; n++) {
int index_n = n * channels * pooled_width * pooled_height;
@@ -208,19 +211,23 @@ void ROIAlignForward(
roi_bin_grid_w,
&pre_calc);
- int c;
-#pragma omp parallel for private(c) \
-num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
- for (c = 0; c < channels; c++) {
+ for (int c = 0; c < channels; c++) {
int index_n_c = index_n + c * pooled_width * pooled_height;
- const T* offset_bottom_data =
- bottom_data + (roi_batch_ind * channels + c) * height * width;
int pre_calc_index = 0;
for (int ph = 0; ph < pooled_height; ph++) {
for (int pw = 0; pw < pooled_width; pw++) {
int index = index_n_c + ph * pooled_width + pw;
+ int c_unpooled = c;
+ int channels_unpooled = channels;
+ if (position_sensitive) {
+ c_unpooled = c * pooled_height * pooled_width + ph * pooled_width + pw;
+ channels_unpooled = channels * pooled_height * pooled_width;
+ }
+ const T* offset_bottom_data =
+ bottom_data + (roi_batch_ind * channels_unpooled + c_unpooled)
+ * height * width;
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
@@ -310,6 +317,7 @@ void ROIAlignBackward(
const T* top_diff,
const int /*num_rois*/,
const T& spatial_scale,
+ const bool position_sensitive,
const int channels,
const int height,
const int width,
@@ -347,8 +355,15 @@ void ROIAlignBackward(
T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+ int c_unpooled = c;
+ int channels_unpooled = channels;
+ if (position_sensitive) {
+ c_unpooled = c * pooled_height * pooled_width + ph * pooled_width + pw;
+ channels_unpooled = channels * pooled_height * pooled_width;
+ }
T* offset_bottom_diff =
- bottom_diff + (roi_batch_ind * channels + c) * height * width;
+ bottom_diff + (roi_batch_ind * channels_unpooled + c_unpooled)
+ * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
@@ -426,7 +441,7 @@ void ROIAlignForwardCompute(const nnvm::NodeAttrs& attrs,
const int count = out_data[roialign::kOut].Size();
// const int num_rois = in_data[roialign::kBox].size(0);
- const int channels = in_data[roialign::kData].size(1);
+ const int channels = out_data[roialign::kOut].size(1); // channels of pooled output
const int height = in_data[roialign::kData].size(2);
const int width = in_data[roialign::kData].size(3);
const int pooled_height = out_data[roialign::kOut].size(2);
@@ -439,9 +454,9 @@ void ROIAlignForwardCompute(const nnvm::NodeAttrs& attrs,
const DType *bottom_rois = in_data[roialign::kBox].dptr();
DType *top_data = out_data[roialign::kOut].dptr();
- ROIAlignForward(count, bottom_data, param.spatial_scale, channels,
- height, width, pooled_height, pooled_width, param.sample_ratio,
- bottom_rois, rois_cols, top_data);
+ ROIAlignForward(count, bottom_data, param.spatial_scale, param.position_sensitive,
+ channels, height, width, pooled_height, pooled_width,
+ param.sample_ratio, bottom_rois, rois_cols, top_data);
})
}
@@ -470,7 +485,7 @@ void ROIAlignBackwardCompute(const nnvm::NodeAttrs& attrs,
const int count = out_grad[0].Size();
const int num_rois = in_data[0].size(0);
- const int channels = outputs[0].size(1);
+ const int channels = out_grad[0].size(1); // channels of pooled output
const int height = outputs[0].size(2);
const int width = outputs[0].size(3);
const int pooled_height = out_grad[0].size(2);
@@ -489,8 +504,9 @@ void ROIAlignBackwardCompute(const nnvm::NodeAttrs& attrs,
Fill(s, outputs[0], kWriteTo, static_cast(0));
}
ROIAlignBackward(count, top_diff, num_rois, param.spatial_scale,
- channels, height, width, pooled_height, pooled_width,
- param.sample_ratio, grad_in, bottom_rois, rois_cols);
+ param.position_sensitive, channels, height, width,
+ pooled_height, pooled_width, param.sample_ratio, grad_in,
+ bottom_rois, rois_cols);
}
if (kWriteTo == req[roialign::kBox]) {
Fill(s, outputs[1], kWriteTo, static_cast(0));
@@ -545,8 +561,17 @@ He, Kaiming, et al. "Mask R-CNN." ICCV, 2017
CHECK_EQ(bshape[1], 5) << "bbox should be a 2D tensor of shape [batch, 5]";
// out: [num_rois, c, pooled_h, pooled_w]
out_shape->clear();
- out_shape->push_back(
- Shape4(bshape[0], dshape[1], param.pooled_size[0], param.pooled_size[1]));
+ if (param.position_sensitive) {
+ CHECK_EQ(dshape[1] % (param.pooled_size[0]*param.pooled_size[1]), 0) <<
+ "Input channels should be divided by pooled_size[0]*pooled_size[1]"
+ "when position_sensitive is true.";
+ out_shape->push_back(
+ Shape4(bshape[0], dshape[1]/param.pooled_size[0]/param.pooled_size[1],
+ param.pooled_size[0], param.pooled_size[1]));
+ } else {
+ out_shape->push_back(
+ Shape4(bshape[0], dshape[1], param.pooled_size[0], param.pooled_size[1]));
+ }
return true;
})
.set_attr("FInferType", [](const nnvm::NodeAttrs& attrs,
diff --git a/src/operator/contrib/roi_align.cu b/src/operator/contrib/roi_align.cu
index d3db70b73b1a..38b461d5f58c 100644
--- a/src/operator/contrib/roi_align.cu
+++ b/src/operator/contrib/roi_align.cu
@@ -20,7 +20,7 @@
* Copyright (c) 2018 by Contributors
* \file roi_align.cu
* \brief roi align operator
- * \author Hang Zhang
+ * \author Hang Zhang, Shesung
* Adapted from Caffe2
*/
#include "./roi_align-inl.h"
@@ -111,6 +111,7 @@ __global__ void RoIAlignForwardKernel(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
+ const bool position_sensitive,
const int channels,
const int height,
const int width,
@@ -145,8 +146,15 @@ __global__ void RoIAlignForwardKernel(
T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+ int c_unpooled = c;
+ int channels_unpooled = channels;
+ if (position_sensitive) {
+ c_unpooled = c * pooled_height * pooled_width + ph * pooled_width + pw;
+ channels_unpooled = channels * pooled_height * pooled_width;
+ }
const T* offset_bottom_data =
- bottom_data + (roi_batch_ind * channels + c) * height * width;
+ bottom_data + (roi_batch_ind * channels_unpooled + c_unpooled)
+ * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
@@ -242,6 +250,7 @@ __global__ void RoIAlignBackwardKernel(
const T* top_diff,
const int num_rois,
const T spatial_scale,
+ const bool position_sensitive,
const int channels,
const int height,
const int width,
@@ -276,8 +285,15 @@ __global__ void RoIAlignBackwardKernel(
T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+ int c_unpooled = c;
+ int channels_unpooled = channels;
+ if (position_sensitive) {
+ c_unpooled = c * pooled_height * pooled_width + ph * pooled_width + pw;
+ channels_unpooled = channels * pooled_height * pooled_width;
+ }
T* offset_bottom_diff =
- bottom_diff + (roi_batch_ind * channels + c) * height * width;
+ bottom_diff + (roi_batch_ind * channels_unpooled + c_unpooled)
+ * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
@@ -357,7 +373,7 @@ void ROIAlignForwardCompute(const nnvm::NodeAttrs& attrs,
const int count = out_data[roialign::kOut].Size();
const int num_rois = in_data[roialign::kBox].size(0);
- const int channels = in_data[roialign::kData].size(1);
+ const int channels = out_data[roialign::kOut].size(1); // channels of pooled output
const int height = in_data[roialign::kData].size(2);
const int width = in_data[roialign::kData].size(3);
const int pooled_height = out_data[roialign::kOut].size(2);
@@ -377,6 +393,7 @@ void ROIAlignForwardCompute(const nnvm::NodeAttrs& attrs,
count,
bottom_data,
param.spatial_scale,
+ param.position_sensitive,
channels,
height,
width,
@@ -414,7 +431,7 @@ void ROIAlignBackwardCompute(const nnvm::NodeAttrs& attrs,
const int count = out_grad[0].Size();
const int num_rois = in_data[0].size(0);
- const int channels = outputs[0].size(1);
+ const int channels = out_grad[0].size(1); // channels of pooled output
const int height = outputs[0].size(2);
const int width = outputs[0].size(3);
const int pooled_height = out_grad[0].size(2);
@@ -445,6 +462,7 @@ void ROIAlignBackwardCompute(const nnvm::NodeAttrs& attrs,
top_diff,
num_rois,
param.spatial_scale,
+ param.position_sensitive,
channels,
height,
width,
diff --git a/src/operator/contrib/tensorrt-inl.h b/src/operator/contrib/tensorrt-inl.h
index be335ab1208f..062d22e35795 100644
--- a/src/operator/contrib/tensorrt-inl.h
+++ b/src/operator/contrib/tensorrt-inl.h
@@ -38,7 +38,6 @@
#include
#include
-#include
#include
#include
@@ -49,6 +48,7 @@
#include
#include
+#include "nnvm_to_onnx-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"
#include "../../common/serialization.h"
@@ -60,49 +60,15 @@ namespace mxnet {
namespace op {
using namespace nnvm;
-using namespace ::onnx;
using int64 = ::google::protobuf::int64;
-namespace tensorrt {
- enum class TypeIO { Inputs = 0, Outputs = 1 };
- using NameToIdx_t = std::map;
- using InferenceTuple_t = std::tuple;
- using InferenceMap_t = std::map;
-} // namespace tensorrt
using trt_name_to_idx = std::map;
-struct TRTParam : public dmlc::Parameter {
- std::string serialized_onnx_graph;
- std::string serialized_input_map;
- std::string serialized_output_map;
- tensorrt::NameToIdx_t input_map;
- tensorrt::InferenceMap_t output_map;
- ::onnx::ModelProto onnx_pb_graph;
-
- TRTParam() {}
-
- TRTParam(const ::onnx::ModelProto& onnx_graph,
- const tensorrt::InferenceMap_t& input_map,
- const tensorrt::NameToIdx_t& output_map) {
- common::Serialize(input_map, &serialized_input_map);
- common::Serialize(output_map, &serialized_output_map);
- onnx_graph.SerializeToString(&serialized_onnx_graph);
- }
-
-DMLC_DECLARE_PARAMETER(TRTParam) {
- DMLC_DECLARE_FIELD(serialized_onnx_graph)
- .describe("Serialized ONNX graph");
- DMLC_DECLARE_FIELD(serialized_input_map)
- .describe("Map from inputs to topological order as input.");
- DMLC_DECLARE_FIELD(serialized_output_map)
- .describe("Map from outputs to order in g.outputs.");
- }
-};
struct TRTEngineParam {
nvinfer1::IExecutionContext* trt_executor;
- std::vector > binding_map;
+ std::vector > binding_map;
};
} // namespace op
diff --git a/src/operator/contrib/tensorrt.cc b/src/operator/contrib/tensorrt.cc
index 619fe1e2b8f4..88a65fba3ea3 100644
--- a/src/operator/contrib/tensorrt.cc
+++ b/src/operator/contrib/tensorrt.cc
@@ -44,20 +44,18 @@
namespace mxnet {
namespace op {
-DMLC_REGISTER_PARAMETER(TRTParam);
-
OpStatePtr GetPtrMapping(nvinfer1::ICudaEngine* trt_engine,
- tensorrt::NameToIdx_t input_map,
- tensorrt::NameToIdx_t output_map) {
+ nnvm_to_onnx::NameToIdx_t input_map,
+ nnvm_to_onnx::NameToIdx_t output_map) {
TRTEngineParam param;
for (int b = 0; b < trt_engine->getNbBindings(); ++b) {
const std::string& binding_name = trt_engine->getBindingName(b);
if (trt_engine->bindingIsInput(b)) {
param.binding_map.emplace_back(input_map[binding_name],
- tensorrt::TypeIO::Inputs);
+ nnvm_to_onnx::TypeIO::Inputs);
} else {
param.binding_map.emplace_back(output_map[binding_name],
- tensorrt::TypeIO::Outputs);
+ nnvm_to_onnx::TypeIO::Outputs);
}
}
param.trt_executor = trt_engine->createExecutionContext();
@@ -67,7 +65,7 @@ OpStatePtr GetPtrMapping(nvinfer1::ICudaEngine* trt_engine,
OpStatePtr TRTCreateState(const nnvm::NodeAttrs& attrs, Context /*ctx*/,
const std::vector& /*ishape*/,
const std::vector& /*itype*/) {
- const auto& node_param = nnvm::get(attrs.parsed);
+ const auto& node_param = nnvm::get(attrs.parsed);
::onnx::ModelProto model_proto;
bool success = model_proto.ParseFromString(node_param.serialized_onnx_graph);
@@ -82,7 +80,7 @@ OpStatePtr TRTCreateState(const nnvm::NodeAttrs& attrs, Context /*ctx*/,
nvinfer1::ICudaEngine* const trt_engine = ::onnx_to_tensorrt::onnxToTrtCtx(
node_param.serialized_onnx_graph, batch_size, 1 << 30);
- tensorrt::NameToIdx_t output_map;
+ nnvm_to_onnx::NameToIdx_t output_map;
for (auto& el : node_param.output_map) {
output_map[el.first] = std::get<0>(el.second);
}
@@ -90,7 +88,7 @@ OpStatePtr TRTCreateState(const nnvm::NodeAttrs& attrs, Context /*ctx*/,
}
void TRTParamParser(nnvm::NodeAttrs* attrs) {
- TRTParam param_;
+ ONNXParam param_;
try {
param_.Init(attrs->dict);
@@ -114,7 +112,7 @@ void TRTParamParser(nnvm::NodeAttrs* attrs) {
inline bool TRTInferShape(const NodeAttrs& attrs, std::vector* /*in_shape*/,
std::vector* out_shape) {
- const auto &node_param = nnvm::get(attrs.parsed);
+ const auto &node_param = nnvm::get(attrs.parsed);
for (auto& el : node_param.output_map) {
(*out_shape)[std::get<0>(el.second)] = std::get<1>(el.second);
}
@@ -131,7 +129,7 @@ inline bool TRTInferStorageType(const NodeAttrs& /*attrs*/, const int /*dev_mask
inline bool TRTInferType(const NodeAttrs& attrs, std::vector* /*in_dtype*/,
std::vector* out_dtype) {
- const auto& node_param = nnvm::get(attrs.parsed);
+ const auto& node_param = nnvm::get(attrs.parsed);
for (auto& el : node_param.output_map) {
(*out_dtype)[std::get<0>(el.second)] = std::get<3>(el.second);
}
@@ -140,7 +138,7 @@ inline bool TRTInferType(const NodeAttrs& attrs, std::vector* /*in_dtype*/,
inline std::vector TRTListInputNames(const NodeAttrs& attrs) {
std::vector output;
- const auto& node_param = nnvm::get(attrs.parsed);
+ const auto& node_param = nnvm::get(attrs.parsed);
output.resize(node_param.input_map.size());
for (auto& el : node_param.input_map) {
output[el.second] = el.first;
@@ -150,7 +148,7 @@ inline std::vector TRTListInputNames(const NodeAttrs& attrs) {
inline std::vector TRTListOutputNames(const NodeAttrs& attrs) {
std::vector output;
- const auto& node_param = nnvm::get(attrs.parsed);
+ const auto& node_param = nnvm::get(attrs.parsed);
output.resize(node_param.output_map.size());
for (auto& el : node_param.output_map) {
output[std::get<0>(el.second)] = el.first;
@@ -162,11 +160,11 @@ NNVM_REGISTER_OP(_trt_op)
.describe(R"code(TRT operation (one engine)
)code" ADD_FILELINE)
.set_num_inputs([](const NodeAttrs& attrs) {
- const auto& node_param = nnvm::get(attrs.parsed);
+ const auto& node_param = nnvm::get(attrs.parsed);
return node_param.input_map.size();
})
.set_num_outputs([](const NodeAttrs& attrs) {
- const auto& node_param = nnvm::get(attrs.parsed);
+ const auto& node_param = nnvm::get(attrs.parsed);
return node_param.output_map.size();
})
.set_attr_parser(TRTParamParser)
diff --git a/src/operator/contrib/tensorrt.cu b/src/operator/contrib/tensorrt.cu
index 2fe8727b73e4..9a9c3c024366 100644
--- a/src/operator/contrib/tensorrt.cu
+++ b/src/operator/contrib/tensorrt.cu
@@ -52,7 +52,7 @@ void TRTCompute(const OpStatePtr& state, const OpContext& ctx,
std::vector bindings;
bindings.reserve(param.binding_map.size());
for (auto& p : param.binding_map) {
- if (p.second == tensorrt::TypeIO::Inputs) {
+ if (p.second == nnvm_to_onnx::TypeIO::Inputs) {
bindings.emplace_back(inputs[p.first].dptr_);
} else {
bindings.emplace_back(outputs[p.first].dptr_);
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index 305eeab21176..fb920c31ce37 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -97,9 +97,10 @@ static void ActivationComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector& inputs,
const std::vector