diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..e534d4594 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include requirements/*.txt diff --git a/backend_ops/CMakeLists.txt b/backend_ops/CMakeLists.txt index 2521bd3b7..028082fbe 100644 --- a/backend_ops/CMakeLists.txt +++ b/backend_ops/CMakeLists.txt @@ -1,4 +1,4 @@ -add_definitions(-std=c++11) +set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_FLAGS_RELEASE "-O3") set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) diff --git a/configs/_base_/backends/ncnn.py b/configs/_base_/backends/ncnn.py index 691a534a9..e1f837770 100644 --- a/configs/_base_/backends/ncnn.py +++ b/configs/_base_/backends/ncnn.py @@ -1 +1 @@ -backend = 'ncnn' +backend_config = dict(type='ncnn') diff --git a/configs/_base_/backends/onnxruntime.py b/configs/_base_/backends/onnxruntime.py index fe6f47113..f4cd8578c 100644 --- a/configs/_base_/backends/onnxruntime.py +++ b/configs/_base_/backends/onnxruntime.py @@ -1 +1 @@ -backend = 'onnxruntime' +backend_config = dict(type='onnxruntime') diff --git a/configs/_base_/backends/ppl.py b/configs/_base_/backends/ppl.py index ad06670ba..7c19ce6ad 100644 --- a/configs/_base_/backends/ppl.py +++ b/configs/_base_/backends/ppl.py @@ -1 +1 @@ -backend = 'ppl' +backend_config = dict(type='ppl') diff --git a/configs/_base_/backends/tensorrt.py b/configs/_base_/backends/tensorrt.py index a22ad92ba..d4257b18d 100644 --- a/configs/_base_/backends/tensorrt.py +++ b/configs/_base_/backends/tensorrt.py @@ -1,5 +1,6 @@ import tensorrt as trt -backend = 'tensorrt' -tensorrt_params = dict( - shared_params=dict(fp16_mode=False, log_level=trt.Logger.INFO)) +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=False, log_level=trt.Logger.INFO, max_workspace_size=0)) diff --git a/configs/_base_/backends/tensorrt_int8.py b/configs/_base_/backends/tensorrt_int8.py index 8f2b3b8f9..4a609af86 100644 --- a/configs/_base_/backends/tensorrt_int8.py +++ b/configs/_base_/backends/tensorrt_int8.py @@ -1,5 +1,5 @@ _base_ = ['./tensorrt.py'] -create_calib = True -calib_params = dict(calib_file='calib_data.h5') -tensorrt_params = dict(shared_params=dict(fp16_mode=True, int8_mode=True)) +backend_config = dict(common_config=dict(fp16_mode=True, int8_mode=True)) + +calib_config = dict(create_calib=True, calib_file='calib_data.h5') diff --git a/configs/_base_/onnx_config.py b/configs/_base_/onnx_config.py new file mode 100644 index 000000000..bf48e7ab7 --- /dev/null +++ b/configs/_base_/onnx_config.py @@ -0,0 +1,9 @@ +onnx_config = dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file='end2end.onnx', + input_names=['input'], + output_names=['output'], + input_shape=None) diff --git a/configs/_base_/torch2onnx.py b/configs/_base_/torch2onnx.py deleted file mode 100644 index 66fb4dcf7..000000000 --- a/configs/_base_/torch2onnx.py +++ /dev/null @@ -1,5 +0,0 @@ -pytorch2onnx = dict( - export_params=True, - keep_initializers_as_inputs=False, - opset_version=11, - save_file='end2end.onnx') diff --git a/configs/mmcls/base.py b/configs/mmcls/base.py deleted file mode 100644 index 3861d87dd..000000000 --- a/configs/mmcls/base.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = ['../_base_/torch2onnx.py'] -codebase = 'mmcls' -pytorch2onnx = dict(input_names=['input'], output_names=['output']) diff --git a/configs/mmcls/base_dynamic.py b/configs/mmcls/base_dynamic.py deleted file mode 100644 index 6a663f249..000000000 --- a/configs/mmcls/base_dynamic.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = ['./base.py'] -pytorch2onnx = dict(dynamic_axes={ - 'input': { - 0: 'batch', - 2: 'height', - 3: 'width' - }, - 'output': { - 0: 'batch' - } -}) diff --git a/configs/mmcls/classification_dynamic.py b/configs/mmcls/classification_dynamic.py new file mode 100644 index 000000000..5c8019e5c --- /dev/null +++ b/configs/mmcls/classification_dynamic.py @@ -0,0 +1,13 @@ +_base_ = ['./classification_static.py'] + +onnx_config = dict( + dynamic_axes={ + 'input': { + 0: 'batch', + 2: 'height', + 3: 'width' + }, + 'output': { + 0: 'batch' + } + }, ) diff --git a/configs/mmcls/classification_ncnn_dynamic.py b/configs/mmcls/classification_ncnn_dynamic.py new file mode 100644 index 000000000..f15a13e90 --- /dev/null +++ b/configs/mmcls/classification_ncnn_dynamic.py @@ -0,0 +1 @@ +_base_ = ['./classification_dynamic.py', '../_base_/backends/ncnn.py'] diff --git a/configs/mmcls/classification_ncnn_static.py b/configs/mmcls/classification_ncnn_static.py new file mode 100644 index 000000000..40990a5cd --- /dev/null +++ b/configs/mmcls/classification_ncnn_static.py @@ -0,0 +1,3 @@ +_base_ = ['./classification_static.py', '../_base_/backends/ncnn.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmcls/classification_onnxruntime_dynamic.py b/configs/mmcls/classification_onnxruntime_dynamic.py new file mode 100644 index 000000000..f2a559434 --- /dev/null +++ b/configs/mmcls/classification_onnxruntime_dynamic.py @@ -0,0 +1 @@ +_base_ = ['./classification_dynamic.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmcls/classification_onnxruntime_static.py b/configs/mmcls/classification_onnxruntime_static.py new file mode 100644 index 000000000..8a61ebe73 --- /dev/null +++ b/configs/mmcls/classification_onnxruntime_static.py @@ -0,0 +1,3 @@ +_base_ = ['./classification_static.py', '../_base_/backends/onnxruntime.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmcls/classification_ppl_dynamic.py b/configs/mmcls/classification_ppl_dynamic.py new file mode 100644 index 000000000..15eb62c4e --- /dev/null +++ b/configs/mmcls/classification_ppl_dynamic.py @@ -0,0 +1 @@ +_base_ = ['./classification_dynamic.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmcls/classification_ppl_static.py b/configs/mmcls/classification_ppl_static.py new file mode 100644 index 000000000..a5c577a69 --- /dev/null +++ b/configs/mmcls/classification_ppl_static.py @@ -0,0 +1,3 @@ +_base_ = ['./classification_static.py', '../_base_/backends/ppl.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmcls/classification_static.py b/configs/mmcls/classification_static.py new file mode 100644 index 000000000..2cfeef524 --- /dev/null +++ b/configs/mmcls/classification_static.py @@ -0,0 +1,3 @@ +_base_ = ['../_base_/onnx_config.py'] + +codebase_config = dict(type='mmcls', task='Classification') diff --git a/configs/mmcls/classification_tensorrt_dynamic-224x224-224x224.py b/configs/mmcls/classification_tensorrt_dynamic-224x224-224x224.py new file mode 100644 index 000000000..1e091713a --- /dev/null +++ b/configs/mmcls/classification_tensorrt_dynamic-224x224-224x224.py @@ -0,0 +1,13 @@ +_base_ = ['./classification_dynamic.py', '../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=[224, 224]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 224, 224], + opt_shape=[4, 3, 224, 224], + max_shape=[64, 3, 224, 224]))) + ]) diff --git a/configs/mmcls/classification_tensorrt_int8_dynamic-224x224-224x224.py b/configs/mmcls/classification_tensorrt_int8_dynamic-224x224-224x224.py new file mode 100644 index 000000000..e41ddf116 --- /dev/null +++ b/configs/mmcls/classification_tensorrt_int8_dynamic-224x224-224x224.py @@ -0,0 +1,13 @@ +_base_ = ['./classification_dynamic.py', '../_base_/backends/tensorrt_int8.py'] + +onnx_config = dict(input_shape=[224, 224]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 224, 224], + opt_shape=[4, 3, 224, 224], + max_shape=[64, 3, 224, 224]))) + ]) diff --git a/configs/mmcls/classification_tensorrt_int8_static-224x224.py b/configs/mmcls/classification_tensorrt_int8_static-224x224.py new file mode 100644 index 000000000..3fb5cc63e --- /dev/null +++ b/configs/mmcls/classification_tensorrt_int8_static-224x224.py @@ -0,0 +1,13 @@ +_base_ = ['./classification_static.py', '../_base_/backends/tensorrt_int8.py'] + +onnx_config = dict(input_shape=[224, 224]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 224, 224], + opt_shape=[1, 3, 224, 224], + max_shape=[1, 3, 224, 224]))) + ]) diff --git a/configs/mmcls/classification_tensorrt_static-224x224.py b/configs/mmcls/classification_tensorrt_static-224x224.py new file mode 100644 index 000000000..1b0d49f5d --- /dev/null +++ b/configs/mmcls/classification_tensorrt_static-224x224.py @@ -0,0 +1,13 @@ +_base_ = ['./classification_static.py', '../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=[224, 224]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 224, 224], + opt_shape=[1, 3, 224, 224], + max_shape=[1, 3, 224, 224]))) + ]) diff --git a/configs/mmcls/ncnn.py b/configs/mmcls/ncnn.py deleted file mode 100644 index 538444d32..000000000 --- a/configs/mmcls/ncnn.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/ncnn.py'] diff --git a/configs/mmcls/onnxruntime.py b/configs/mmcls/onnxruntime.py deleted file mode 100644 index 89ca7f9a1..000000000 --- a/configs/mmcls/onnxruntime.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmcls/ppl.py b/configs/mmcls/ppl.py deleted file mode 100644 index 7a81978b0..000000000 --- a/configs/mmcls/ppl.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmcls/tensorrt.py b/configs/mmcls/tensorrt.py deleted file mode 100644 index 3dd11c3e9..000000000 --- a/configs/mmcls/tensorrt.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/tensorrt.py'] -tensorrt_params = dict(model_params=[ - dict( - save_file='end2end.engine', - opt_shape_dict=dict( - input=[[1, 3, 224, 224], [4, 3, 224, 224], [64, 3, 224, 224]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmcls/tensorrt_int8.py b/configs/mmcls/tensorrt_int8.py deleted file mode 100644 index af7123155..000000000 --- a/configs/mmcls/tensorrt_int8.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/tensorrt_int8.py'] -tensorrt_params = dict(model_params=[ - dict( - save_file='end2end.engine', - opt_shape_dict=dict( - input=[[1, 3, 224, 224], [1, 3, 224, 224], [64, 3, 224, 224]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmdet/base_dynamic.py b/configs/mmdet/_base_/base_dynamic.py similarity index 93% rename from configs/mmdet/base_dynamic.py rename to configs/mmdet/_base_/base_dynamic.py index 660e28be8..497db262f 100644 --- a/configs/mmdet/base_dynamic.py +++ b/configs/mmdet/_base_/base_dynamic.py @@ -1,5 +1,5 @@ _base_ = ['./base_static.py'] -pytorch2onnx = dict( +onnx_config = dict( dynamic_axes={ 'input': { 0: 'batch', diff --git a/configs/mmdet/_base_/base_static.py b/configs/mmdet/_base_/base_static.py new file mode 100644 index 000000000..2570694fd --- /dev/null +++ b/configs/mmdet/_base_/base_static.py @@ -0,0 +1,14 @@ +_base_ = ['../../_base_/onnx_config.py'] + +onnx_config = dict(output_names=['dets', 'labels'], input_shape=None) +codebase_config = dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=100, + background_label_id=-1, + )) diff --git a/configs/mmdet/_base_/base_tensorrt_dynamic-320x320-1344x1344.py b/configs/mmdet/_base_/base_tensorrt_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..ea3308278 --- /dev/null +++ b/configs/mmdet/_base_/base_tensorrt_dynamic-320x320-1344x1344.py @@ -0,0 +1,12 @@ +_base_ = ['./base_dynamic.py', '../../_base_/backends/tensorrt.py'] + +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 320, 320], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 1344, 1344]))) + ]) diff --git a/configs/mmdet/_base_/base_tensorrt_int8_dynamic-320x320-1344x1344.py b/configs/mmdet/_base_/base_tensorrt_int8_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..d44d740d6 --- /dev/null +++ b/configs/mmdet/_base_/base_tensorrt_int8_dynamic-320x320-1344x1344.py @@ -0,0 +1,12 @@ +_base_ = ['./base_dynamic.py', '../../_base_/backends/tensorrt_int8.py'] + +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 320, 320], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 1344, 1344]))) + ]) diff --git a/configs/mmdet/_base_/base_tensorrt_int8_static-800x1344.py b/configs/mmdet/_base_/base_tensorrt_int8_static-800x1344.py new file mode 100644 index 000000000..c597b2d80 --- /dev/null +++ b/configs/mmdet/_base_/base_tensorrt_int8_static-800x1344.py @@ -0,0 +1,14 @@ +_base_ = ['./base_static.py', '../../_base_/backends/tensorrt_int8.py'] + +onnx_config = dict(input_shape=(1344, 800)) + +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 800, 1344], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 800, 1344]))) + ]) diff --git a/configs/mmdet/_base_/base_tensorrt_static-800x1344.py b/configs/mmdet/_base_/base_tensorrt_static-800x1344.py new file mode 100644 index 000000000..a83c8f961 --- /dev/null +++ b/configs/mmdet/_base_/base_tensorrt_static-800x1344.py @@ -0,0 +1,14 @@ +_base_ = ['./base_static.py', '../../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=(1344, 800)) + +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 800, 1344], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 800, 1344]))) + ]) diff --git a/configs/mmdet/mask_base.py b/configs/mmdet/_base_/mask_base_dynamic.py similarity index 79% rename from configs/mmdet/mask_base.py rename to configs/mmdet/_base_/mask_base_dynamic.py index ae0bad140..ea39abf2f 100644 --- a/configs/mmdet/mask_base.py +++ b/configs/mmdet/_base_/mask_base_dynamic.py @@ -1,6 +1,5 @@ -_base_ = ['./base_dynamic.py'] -pytorch2onnx = dict( - output_names=['dets', 'labels', 'masks'], +_base_ = ['./mask_base_static.py'] +onnx_config = dict( dynamic_axes={ 'input': { 0: 'batch', @@ -21,5 +20,4 @@ 2: 'height', 3: 'width' }, - }, -) + }) diff --git a/configs/mmdet/_base_/mask_base_static.py b/configs/mmdet/_base_/mask_base_static.py new file mode 100644 index 000000000..f866a326b --- /dev/null +++ b/configs/mmdet/_base_/mask_base_static.py @@ -0,0 +1,3 @@ +_base_ = ['./base_static.py'] + +onnx_config = dict(output_names=['dets', 'labels', 'masks']) diff --git a/configs/mmdet/base_static.py b/configs/mmdet/base_static.py deleted file mode 100644 index b90329f74..000000000 --- a/configs/mmdet/base_static.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = ['../_base_/torch2onnx.py'] -codebase = 'mmdet' -pytorch2onnx = dict( - input_names=['input'], - output_names=['dets', 'labels'], -) - -post_processing = dict( - score_threshold=0.05, - iou_threshold=0.5, - max_output_boxes_per_class=200, - pre_top_k=-1, - keep_top_k=100, - background_label_id=-1, -) diff --git a/configs/mmdet/mask_onnxruntime.py b/configs/mmdet/mask_onnxruntime.py deleted file mode 100644 index 262046b96..000000000 --- a/configs/mmdet/mask_onnxruntime.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./mask_base.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/mask_tensorrt.py b/configs/mmdet/mask_tensorrt.py deleted file mode 100644 index e2a44fe60..000000000 --- a/configs/mmdet/mask_tensorrt.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./mask_base.py', './tensorrt_base.py'] diff --git a/configs/mmdet/onnxruntime.py b/configs/mmdet/onnxruntime.py deleted file mode 100644 index 89ca7f9a1..000000000 --- a/configs/mmdet/onnxruntime.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/partition_single_stage.py b/configs/mmdet/partition_single_stage.py deleted file mode 100644 index 7bd2d28b8..000000000 --- a/configs/mmdet/partition_single_stage.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = ['./base_dynamic.py'] - -apply_marks = True - -partition_params = dict(partition_type='single_stage_base') diff --git a/configs/mmdet/partition_single_stage_onnxruntime.py b/configs/mmdet/partition_single_stage_onnxruntime.py deleted file mode 100644 index 7bb845c44..000000000 --- a/configs/mmdet/partition_single_stage_onnxruntime.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./partition_single_stage.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/partition_single_stage_static.py b/configs/mmdet/partition_single_stage_static.py deleted file mode 100644 index 547e9e4e2..000000000 --- a/configs/mmdet/partition_single_stage_static.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = ['./base_static.py'] - -apply_marks = True - -partition_params = dict(partition_type='single_stage_base') diff --git a/configs/mmdet/partition_single_stage_static_ncnn.py b/configs/mmdet/partition_single_stage_static_ncnn.py deleted file mode 100755 index dc64de07e..000000000 --- a/configs/mmdet/partition_single_stage_static_ncnn.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./partition_single_stage_static.py', '../_base_/backends/ncnn.py'] diff --git a/configs/mmdet/partition_two_stage.py b/configs/mmdet/partition_two_stage.py deleted file mode 100644 index 4a192bf54..000000000 --- a/configs/mmdet/partition_two_stage.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = ['./base_dynamic.py'] - -apply_marks = True - -partition_params = dict(partition_type='two_stage_base') diff --git a/configs/mmdet/partition_two_stage_onnxruntime.py b/configs/mmdet/partition_two_stage_onnxruntime.py deleted file mode 100644 index d6a76e868..000000000 --- a/configs/mmdet/partition_two_stage_onnxruntime.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./partition_two_stage.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/partition_two_stage_static.py b/configs/mmdet/partition_two_stage_static.py deleted file mode 100644 index 3eebf745a..000000000 --- a/configs/mmdet/partition_two_stage_static.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = ['./base_static.py'] - -apply_marks = True - -partition_params = dict(partition_type='two_stage_base') diff --git a/configs/mmdet/partition_two_stage_static_ncnn.py b/configs/mmdet/partition_two_stage_static_ncnn.py deleted file mode 100755 index 9a07959c8..000000000 --- a/configs/mmdet/partition_two_stage_static_ncnn.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./partition_two_stage_static.py', '../_base_/backends/ncnn.py'] diff --git a/configs/mmdet/partition_two_stage_tensorrt_int8.py b/configs/mmdet/partition_two_stage_tensorrt_int8.py deleted file mode 100644 index 77b770185..000000000 --- a/configs/mmdet/partition_two_stage_tensorrt_int8.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = ['./partition_two_stage.py', '../_base_/backends/tensorrt_int8.py'] - -tensorrt_params = dict(model_params=[ - dict( - opt_shape_dict=dict( - input=[[1, 3, 320, 320], [1, 3, 800, 1344], [1, 3, 1344, 1344]]), - max_workspace_size=1 << 30), - dict( - opt_shape_dict=dict(bbox_feats=[[500, 256, 7, 7], [1000, 256, 7, 7], - [2000, 256, 7, 7]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmdet/ppl.py b/configs/mmdet/ppl.py deleted file mode 100644 index 7a81978b0..000000000 --- a/configs/mmdet/ppl.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmdet/single-stage/single-stage_onnxruntime_dynamic.py b/configs/mmdet/single-stage/single-stage_onnxruntime_dynamic.py new file mode 100644 index 000000000..14a4dc227 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_onnxruntime_dynamic.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_dynamic.py', '../../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/single-stage/single-stage_onnxruntime_static.py b/configs/mmdet/single-stage/single-stage_onnxruntime_static.py new file mode 100644 index 000000000..486a734d9 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_onnxruntime_static.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/single-stage/single-stage_partition_ncnn_static.py b/configs/mmdet/single-stage/single-stage_partition_ncnn_static.py new file mode 100644 index 000000000..b2a651733 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_partition_ncnn_static.py @@ -0,0 +1,3 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/ncnn.py'] + +partition_config = dict(type='single_stage', apply_marks=True) diff --git a/configs/mmdet/single-stage/single-stage_partition_onnxruntime_dynamic.py b/configs/mmdet/single-stage/single-stage_partition_onnxruntime_dynamic.py new file mode 100644 index 000000000..88c19f4e2 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_partition_onnxruntime_dynamic.py @@ -0,0 +1,3 @@ +_base_ = ['./single-stage_onnxruntime_dynamic.py'] + +partition_config = dict(type='single_stage', apply_marks=True) diff --git a/configs/mmdet/single-stage/single-stage_partition_onnxruntime_static.py b/configs/mmdet/single-stage/single-stage_partition_onnxruntime_static.py new file mode 100644 index 000000000..a43b45a18 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_partition_onnxruntime_static.py @@ -0,0 +1,3 @@ +_base_ = ['./single-stage_onnxruntime_static.py'] + +partition_config = dict(type='single_stage', apply_marks=True) diff --git a/configs/mmdet/single-stage/single-stage_ppl_dynamic.py b/configs/mmdet/single-stage/single-stage_ppl_dynamic.py new file mode 100644 index 000000000..5d8068fa3 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_ppl_dynamic.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_dynamic.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmdet/single-stage/single-stage_ppl_static.py b/configs/mmdet/single-stage/single-stage_ppl_static.py new file mode 100644 index 000000000..b2eaf0f97 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_ppl_static.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmdet/single-stage/single-stage_tensorrt_dynamic-320x320-1344x1344.py b/configs/mmdet/single-stage/single-stage_tensorrt_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..9eeb2c4cd --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_tensorrt_dynamic-320x320-1344x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_dynamic-320x320-1344x1344.py'] diff --git a/configs/mmdet/single-stage/single-stage_tensorrt_int8_dynamic-320x320-1344x1344.py b/configs/mmdet/single-stage/single-stage_tensorrt_int8_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..4153eaa83 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_tensorrt_int8_dynamic-320x320-1344x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_int8_dynamic-320x320-1344x1344.py'] diff --git a/configs/mmdet/single-stage/single-stage_tensorrt_int8_static-800x1344.py b/configs/mmdet/single-stage/single-stage_tensorrt_int8_static-800x1344.py new file mode 100644 index 000000000..12dd90c6e --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_tensorrt_int8_static-800x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_int8_static-800x1344.py'] diff --git a/configs/mmdet/single-stage/single-stage_tensorrt_static-800x1344.py b/configs/mmdet/single-stage/single-stage_tensorrt_static-800x1344.py new file mode 100644 index 000000000..737054533 --- /dev/null +++ b/configs/mmdet/single-stage/single-stage_tensorrt_static-800x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_static-800x1344.py'] diff --git a/configs/mmdet/tensorrt.py b/configs/mmdet/tensorrt.py deleted file mode 100644 index 35e3a9210..000000000 --- a/configs/mmdet/tensorrt.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', './tensorrt_base.py'] diff --git a/configs/mmdet/tensorrt_base.py b/configs/mmdet/tensorrt_base.py deleted file mode 100644 index 968b7741b..000000000 --- a/configs/mmdet/tensorrt_base.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = ['../_base_/backends/tensorrt.py'] -tensorrt_params = dict(model_params=[ - dict( - opt_shape_dict=dict( - input=[[1, 3, 320, 320], [1, 3, 800, 1344], [1, 3, 1344, 1344]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmdet/tensorrt_int8.py b/configs/mmdet/tensorrt_int8.py deleted file mode 100644 index 910fdc4a7..000000000 --- a/configs/mmdet/tensorrt_int8.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', './tensorrt_int8_base.py'] diff --git a/configs/mmdet/tensorrt_int8_base.py b/configs/mmdet/tensorrt_int8_base.py deleted file mode 100644 index a4feaca42..000000000 --- a/configs/mmdet/tensorrt_int8_base.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = ['../_base_/backends/tensorrt_int8.py'] -tensorrt_params = dict(model_params=[ - dict( - opt_shape_dict=dict( - input=[[1, 3, 320, 320], [1, 3, 800, 1344], [1, 3, 1344, 1344]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmdet/two-stage/two-stage_onnxruntime_dynamic.py b/configs/mmdet/two-stage/two-stage_onnxruntime_dynamic.py new file mode 100644 index 000000000..14a4dc227 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_onnxruntime_dynamic.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_dynamic.py', '../../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/two-stage/two-stage_onnxruntime_static.py b/configs/mmdet/two-stage/two-stage_onnxruntime_static.py new file mode 100644 index 000000000..486a734d9 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_onnxruntime_static.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/onnxruntime.py'] diff --git a/configs/mmdet/two-stage/two-stage_partition_ncnn_static.py b/configs/mmdet/two-stage/two-stage_partition_ncnn_static.py new file mode 100644 index 000000000..2213609de --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_partition_ncnn_static.py @@ -0,0 +1,3 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/ncnn.py'] + +partition_config = dict(type='two_stage', apply_marks=True) diff --git a/configs/mmdet/two-stage/two-stage_partition_onnxruntime_dynamic.py b/configs/mmdet/two-stage/two-stage_partition_onnxruntime_dynamic.py new file mode 100644 index 000000000..2b3878206 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_partition_onnxruntime_dynamic.py @@ -0,0 +1,3 @@ +_base_ = ['./two-stage_onnxruntime_dynamic.py'] + +partition_config = dict(type='two_stage', apply_marks=True) diff --git a/configs/mmdet/two-stage/two-stage_partition_onnxruntime_static.py b/configs/mmdet/two-stage/two-stage_partition_onnxruntime_static.py new file mode 100644 index 000000000..43cc40bc3 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_partition_onnxruntime_static.py @@ -0,0 +1,3 @@ +_base_ = ['./two-stage_onnxruntime_static.py'] + +partition_config = dict(type='two_stage', apply_marks=True) diff --git a/configs/mmdet/two-stage/two-stage_partition_tensorrt-int8_dynamic-320x320-1344x1344.py b/configs/mmdet/two-stage/two-stage_partition_tensorrt-int8_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..7ae5fcb94 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_partition_tensorrt-int8_dynamic-320x320-1344x1344.py @@ -0,0 +1,19 @@ +_base_ = ['./two-stage_tensorrt_int8_dynamic-320x320-1344x1344.py'] + +partition_config = dict(type='two_stage', apply_marks=True) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 320, 320], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 1344, 1344]))), + dict( + input_shapes=dict( + bbox_feats=dict( + min_shape=[500, 256, 7, 7], + opt_shape=[1000, 256, 7, 7], + max_shape=[2000, 256, 7, 7]))) + ]) diff --git a/configs/mmdet/two-stage/two-stage_partition_tensorrt_dynamic-320x320-1344x1344.py b/configs/mmdet/two-stage/two-stage_partition_tensorrt_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..d65a7e977 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_partition_tensorrt_dynamic-320x320-1344x1344.py @@ -0,0 +1,19 @@ +_base_ = ['./two-stage_tensorrt_dynamic-320x320-1344x1344.py'] + +partition_config = dict(type='two_stage', apply_marks=True) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 320, 320], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 1344, 1344]))), + dict( + input_shapes=dict( + bbox_feats=dict( + min_shape=[500, 256, 7, 7], + opt_shape=[1000, 256, 7, 7], + max_shape=[2000, 256, 7, 7]))) + ]) diff --git a/configs/mmdet/two-stage/two-stage_ppl_dynamic.py b/configs/mmdet/two-stage/two-stage_ppl_dynamic.py new file mode 100644 index 000000000..5d8068fa3 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_ppl_dynamic.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_dynamic.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmdet/two-stage/two-stage_ppl_static.py b/configs/mmdet/two-stage/two-stage_ppl_static.py new file mode 100644 index 000000000..b2eaf0f97 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_ppl_static.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmdet/two-stage/two-stage_tensorrt_dynamic-320x320-1344x1344.py b/configs/mmdet/two-stage/two-stage_tensorrt_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..9eeb2c4cd --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_tensorrt_dynamic-320x320-1344x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_dynamic-320x320-1344x1344.py'] diff --git a/configs/mmdet/two-stage/two-stage_tensorrt_int8_dynamic-320x320-1344x1344.py b/configs/mmdet/two-stage/two-stage_tensorrt_int8_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..4153eaa83 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_tensorrt_int8_dynamic-320x320-1344x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_int8_dynamic-320x320-1344x1344.py'] diff --git a/configs/mmdet/two-stage/two-stage_tensorrt_int8_static-800x1344.py b/configs/mmdet/two-stage/two-stage_tensorrt_int8_static-800x1344.py new file mode 100644 index 000000000..12dd90c6e --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_tensorrt_int8_static-800x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_int8_static-800x1344.py'] diff --git a/configs/mmdet/two-stage/two-stage_tensorrt_static-800x1344.py b/configs/mmdet/two-stage/two-stage_tensorrt_static-800x1344.py new file mode 100644 index 000000000..737054533 --- /dev/null +++ b/configs/mmdet/two-stage/two-stage_tensorrt_static-800x1344.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_tensorrt_static-800x1344.py'] diff --git a/configs/mmdet/with-mask/mask_onnxruntime_dynamic.py b/configs/mmdet/with-mask/mask_onnxruntime_dynamic.py new file mode 100644 index 000000000..d8dfd5d92 --- /dev/null +++ b/configs/mmdet/with-mask/mask_onnxruntime_dynamic.py @@ -0,0 +1,3 @@ +_base_ = [ + '../_base_/mask_base_dynamic.py', '../../_base_/backends/onnxruntime.py' +] diff --git a/configs/mmdet/with-mask/mask_onnxruntime_static.py b/configs/mmdet/with-mask/mask_onnxruntime_static.py new file mode 100644 index 000000000..c101bfa83 --- /dev/null +++ b/configs/mmdet/with-mask/mask_onnxruntime_static.py @@ -0,0 +1,3 @@ +_base_ = [ + '../_base_/mask_base_static.py', '../../_base_/backends/onnxruntime.py' +] diff --git a/configs/mmdet/with-mask/mask_ppl_dynamic.py b/configs/mmdet/with-mask/mask_ppl_dynamic.py new file mode 100644 index 000000000..b4b700557 --- /dev/null +++ b/configs/mmdet/with-mask/mask_ppl_dynamic.py @@ -0,0 +1 @@ +_base_ = ['../_base_/mask_base_dynamic.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmdet/with-mask/mask_ppl_static.py b/configs/mmdet/with-mask/mask_ppl_static.py new file mode 100644 index 000000000..aa79aa7d2 --- /dev/null +++ b/configs/mmdet/with-mask/mask_ppl_static.py @@ -0,0 +1 @@ +_base_ = ['../_base_/mask_base_static.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmdet/with-mask/mask_tensorrt_dynamic-320x320-1344x1344.py b/configs/mmdet/with-mask/mask_tensorrt_dynamic-320x320-1344x1344.py new file mode 100644 index 000000000..bfbe16cb2 --- /dev/null +++ b/configs/mmdet/with-mask/mask_tensorrt_dynamic-320x320-1344x1344.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/mask_base_dynamic.py', '../../_base_/backends/tensorrt.py' +] + +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 320, 320], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 1344, 1344]))) + ]) diff --git a/configs/mmdet/with-mask/mask_tensorrt_static-800x1344.py b/configs/mmdet/with-mask/mask_tensorrt_static-800x1344.py new file mode 100644 index 000000000..7883ccb7a --- /dev/null +++ b/configs/mmdet/with-mask/mask_tensorrt_static-800x1344.py @@ -0,0 +1,13 @@ +_base_ = ['../_base_/mask_base_static.py', '../../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=(1344, 800)) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 800, 1344], + opt_shape=[1, 3, 800, 1344], + max_shape=[1, 3, 800, 1344]))) + ]) diff --git a/configs/mmedit/restorer_onnxruntime_dynamic.py b/configs/mmedit/restorer_onnxruntime_dynamic.py deleted file mode 100644 index 89ca7f9a1..000000000 --- a/configs/mmedit/restorer_onnxruntime_dynamic.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmedit/restorer_ppl_dynamic.py b/configs/mmedit/restorer_ppl_dynamic.py deleted file mode 100644 index 7a81978b0..000000000 --- a/configs/mmedit/restorer_ppl_dynamic.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmedit/restorer_tensorrt_dynamic.py b/configs/mmedit/restorer_tensorrt_dynamic.py deleted file mode 100644 index d698e1771..000000000 --- a/configs/mmedit/restorer_tensorrt_dynamic.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/tensorrt.py'] -tensorrt_params = dict(model_params=[ - dict( - opt_shape_dict=dict( - input=[[1, 3, 32, 32], [1, 3, 32, 32], [1, 3, 512, 512]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmedit/base_dynamic.py b/configs/mmedit/super-resolution/super-resolution_dynamic.py similarity index 62% rename from configs/mmedit/base_dynamic.py rename to configs/mmedit/super-resolution/super-resolution_dynamic.py index 4b2847ac1..381e4231d 100644 --- a/configs/mmedit/base_dynamic.py +++ b/configs/mmedit/super-resolution/super-resolution_dynamic.py @@ -1,8 +1,5 @@ -_base_ = ['../_base_/torch2onnx.py'] -codebase = 'mmedit' -pytorch2onnx = dict( - input_names=['input'], - output_names=['output'], +_base_ = ['./super-resolution_static.py'] +onnx_config = dict( dynamic_axes={ 'input': { 0: 'batch', @@ -15,4 +12,4 @@ 3: 'width' } }, -) + input_shape=None) diff --git a/configs/mmedit/super-resolution/super-resolution_onnxruntime_dynamic.py b/configs/mmedit/super-resolution/super-resolution_onnxruntime_dynamic.py new file mode 100644 index 000000000..bda09c044 --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_onnxruntime_dynamic.py @@ -0,0 +1,3 @@ +_base_ = [ + './super-resolution_dynamic.py', '../../_base_/backends/onnxruntime.py' +] diff --git a/configs/mmedit/super-resolution/super-resolution_onnxruntime_static-256x256.py b/configs/mmedit/super-resolution/super-resolution_onnxruntime_static-256x256.py new file mode 100644 index 000000000..49616775a --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_onnxruntime_static-256x256.py @@ -0,0 +1,5 @@ +_base_ = [ + './super-resolution_static.py', '../../_base_/backends/onnxruntime.py' +] + +onnx_config = dict(input_shape=[256, 256]) diff --git a/configs/mmedit/super-resolution/super-resolution_ppl_dynamic.py b/configs/mmedit/super-resolution/super-resolution_ppl_dynamic.py new file mode 100644 index 000000000..96fc23f4e --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_ppl_dynamic.py @@ -0,0 +1 @@ +_base_ = ['./super-resolution_dynamic.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmedit/super-resolution/super-resolution_ppl_static.py b/configs/mmedit/super-resolution/super-resolution_ppl_static.py new file mode 100644 index 000000000..4916cda4e --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_ppl_static.py @@ -0,0 +1,3 @@ +_base_ = ['./super-resolution_static.py', '../../_base_/backends/ppl.py'] + +onnx_config = dict(input_shape=[256, 256]) diff --git a/configs/mmedit/super-resolution/super-resolution_static.py b/configs/mmedit/super-resolution/super-resolution_static.py new file mode 100644 index 000000000..6eeb3d112 --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_static.py @@ -0,0 +1,2 @@ +_base_ = ['../../_base_/onnx_config.py'] +codebase_config = dict(type='mmedit', task='SuperResolution') diff --git a/configs/mmedit/super-resolution/super-resolution_tensorrt_dynamic-32x32-512x512.py b/configs/mmedit/super-resolution/super-resolution_tensorrt_dynamic-32x32-512x512.py new file mode 100644 index 000000000..932cabf9e --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_tensorrt_dynamic-32x32-512x512.py @@ -0,0 +1,9 @@ +_base_ = ['./super-resolution_dynamic.py', '../../_base_/backends/tensorrt.py'] +backend_config = dict(model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 32, 32], + opt_shape=[1, 3, 256, 256], + max_shape=[1, 3, 512, 512]))) +]) diff --git a/configs/mmedit/super-resolution/super-resolution_tensorrt_static-256x256.py b/configs/mmedit/super-resolution/super-resolution_tensorrt_static-256x256.py new file mode 100644 index 000000000..2b1ac65be --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_tensorrt_static-256x256.py @@ -0,0 +1,10 @@ +_base_ = ['./super-resolution_static.py', '../../_base_/backends/tensorrt.py'] +onnx_config = dict(input_shape=[256, 256]) +backend_config = dict(model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 256, 256], + opt_shape=[1, 3, 256, 256], + max_shape=[1, 3, 256, 256]))) +]) diff --git a/configs/mmocr/base.py b/configs/mmocr/base.py deleted file mode 100644 index 823f59e1c..000000000 --- a/configs/mmocr/base.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = ['../_base_/torch2onnx.py'] -codebase = 'mmocr' - -# 'TextDetection' or 'TextRecognition' -task = 'TextDetection' - -pytorch2onnx = dict( - input_names=['input'], - output_names=['output'], - dynamic_axes={'input': { - 0: 'batch', - 2: 'height', - 3: 'width' - }}) - -if task == 'TextRecognition': - pytorch2onnx['dynamic_axes'] = {'input': {0: 'batch', 3: 'width'}} diff --git a/configs/mmocr/base_static.py b/configs/mmocr/base_static.py deleted file mode 100644 index a0fbaacdf..000000000 --- a/configs/mmocr/base_static.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = ['../_base_/torch2onnx.py'] -codebase = 'mmocr' - -# 'TextDetection' or 'TextRecognition' -task = 'TextDetection' -pytorch2onnx = dict(input_names=['input'], output_names=['output']) diff --git a/configs/mmocr/ncnn.py b/configs/mmocr/ncnn.py deleted file mode 100644 index 99d2106ec..000000000 --- a/configs/mmocr/ncnn.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_static.py', '../_base_/backends/ncnn.py'] diff --git a/configs/mmocr/onnxruntime.py b/configs/mmocr/onnxruntime.py deleted file mode 100644 index 83544d08b..000000000 --- a/configs/mmocr/onnxruntime.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmocr/ppl.py b/configs/mmocr/ppl.py deleted file mode 100644 index a644bf326..000000000 --- a/configs/mmocr/ppl.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmocr/tensorrt.py b/configs/mmocr/tensorrt.py deleted file mode 100644 index 57b975f4e..000000000 --- a/configs/mmocr/tensorrt.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = ['./base.py', '../_base_/backends/tensorrt.py'] -tensorrt_params = dict(model_params=[ - dict( - opt_shape_dict=dict( - input=[[1, 3, 320, 320], [1, 3, 1024, 1824], [1, 3, 1024, 1824]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmocr/text-detection/text-detection_dynamic.py b/configs/mmocr/text-detection/text-detection_dynamic.py new file mode 100644 index 000000000..27287e135 --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_dynamic.py @@ -0,0 +1,14 @@ +_base_ = ['./text-detection_static.py'] +onnx_config = dict( + dynamic_axes={ + 'input': { + 0: 'batch', + 2: 'height', + 3: 'width' + }, + 'output': { + 0: 'batch', + 2: 'height', + 3: 'width' + } + }, ) diff --git a/configs/mmocr/text-detection/text-detection_ncnn_static.py b/configs/mmocr/text-detection/text-detection_ncnn_static.py new file mode 100644 index 000000000..b6f1884a3 --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_ncnn_static.py @@ -0,0 +1,3 @@ +_base_ = ['./text-detection_static.py', '../../_base_/backends/ncnn.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmdet/mask_ppl.py b/configs/mmocr/text-detection/text-detection_onnxruntime_dynamic.py similarity index 65% rename from configs/mmdet/mask_ppl.py rename to configs/mmocr/text-detection/text-detection_onnxruntime_dynamic.py index 680b969b5..3ec6ecb1a 100644 --- a/configs/mmdet/mask_ppl.py +++ b/configs/mmocr/text-detection/text-detection_onnxruntime_dynamic.py @@ -1 +1,3 @@ -_base_ = ['./mask_base.py', '../_base_/backends/ppl.py'] +_base_ = [ + './text-detection_dynamic.py', '../../_base_/backends/onnxruntime.py' +] diff --git a/configs/mmocr/text-detection/text-detection_onnxruntime_static.py b/configs/mmocr/text-detection/text-detection_onnxruntime_static.py new file mode 100644 index 000000000..3215b465a --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_onnxruntime_static.py @@ -0,0 +1,3 @@ +_base_ = ['./text-detection_static.py', '../../_base_/backends/onnxruntime.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmocr/text-detection/text-detection_ppl_dynamic.py b/configs/mmocr/text-detection/text-detection_ppl_dynamic.py new file mode 100644 index 000000000..8e9bf8a02 --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_ppl_dynamic.py @@ -0,0 +1 @@ +_base_ = ['./text-detection_dynamic.py', '../../_base_/backends/ppl.py'] diff --git a/configs/mmocr/text-detection/text-detection_ppl_static.py b/configs/mmocr/text-detection/text-detection_ppl_static.py new file mode 100644 index 000000000..5bc4bdd9a --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_ppl_static.py @@ -0,0 +1,3 @@ +_base_ = ['./text-detection_static.py', '../../_base_/backends/ppl.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmocr/text-detection/text-detection_static.py b/configs/mmocr/text-detection/text-detection_static.py new file mode 100644 index 000000000..a4158399d --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_static.py @@ -0,0 +1,2 @@ +_base_ = ['../../_base_/onnx_config.py'] +codebase_config = dict(type='mmocr', task='TextDetection') diff --git a/configs/mmocr/text-detection/text-detection_tensorrt_dynamic-320x320-1024x1824.py b/configs/mmocr/text-detection/text-detection_tensorrt_dynamic-320x320-1024x1824.py new file mode 100644 index 000000000..65c4e4666 --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_tensorrt_dynamic-320x320-1024x1824.py @@ -0,0 +1,11 @@ +_base_ = ['./text-detection_dynamic.py', '../../_base_/backends/tensorrt.py'] +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 320, 320], + opt_shape=[1, 3, 600, 800], + max_shape=[1, 3, 1024, 1824]))) + ]) diff --git a/configs/mmocr/text-detection/text-detection_tensorrt_static-512x512.py b/configs/mmocr/text-detection/text-detection_tensorrt_static-512x512.py new file mode 100644 index 000000000..353fb531d --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_tensorrt_static-512x512.py @@ -0,0 +1,13 @@ +_base_ = ['./text-detection_static.py', '../../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=[512, 512]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 512, 512], + opt_shape=[1, 3, 512, 512], + max_shape=[1, 3, 512, 512]))) + ]) diff --git a/configs/mmocr/text-recognition/text-recognition_dynamic.py b/configs/mmocr/text-recognition/text-recognition_dynamic.py new file mode 100644 index 000000000..4ba340602 --- /dev/null +++ b/configs/mmocr/text-recognition/text-recognition_dynamic.py @@ -0,0 +1,12 @@ +_base_ = ['./text-recognition_static.py'] +onnx_config = dict( + dynamic_axes={ + 'input': { + 0: 'batch', + 3: 'width' + }, + 'output': { + 0: 'batch', + 3: 'width' + } + }, ) diff --git a/configs/mmocr/text-recognition/text-recognition_ncnn_static.py b/configs/mmocr/text-recognition/text-recognition_ncnn_static.py new file mode 100644 index 000000000..9abbf2093 --- /dev/null +++ b/configs/mmocr/text-recognition/text-recognition_ncnn_static.py @@ -0,0 +1,3 @@ +_base_ = ['./text-recognition_static.py', '../../_base_/backends/ncnn.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmocr/text-recognition/text-recognition_onnxruntime_dynamic.py b/configs/mmocr/text-recognition/text-recognition_onnxruntime_dynamic.py new file mode 100644 index 000000000..757e79ced --- /dev/null +++ b/configs/mmocr/text-recognition/text-recognition_onnxruntime_dynamic.py @@ -0,0 +1,3 @@ +_base_ = [ + './text-recognition_dynamic.py', '../../_base_/backends/onnxruntime.py' +] diff --git a/configs/mmocr/text-recognition/text-recognition_onnxruntime_static.py b/configs/mmocr/text-recognition/text-recognition_onnxruntime_static.py new file mode 100644 index 000000000..615b6441f --- /dev/null +++ b/configs/mmocr/text-recognition/text-recognition_onnxruntime_static.py @@ -0,0 +1,5 @@ +_base_ = [ + './text-recognition_static.py', '../../_base_/backends/onnxruntime.py' +] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmocr/text-recognition/text-recognition_static.py b/configs/mmocr/text-recognition/text-recognition_static.py new file mode 100644 index 000000000..6a029fcd5 --- /dev/null +++ b/configs/mmocr/text-recognition/text-recognition_static.py @@ -0,0 +1,2 @@ +_base_ = ['../../_base_/onnx_config.py'] +codebase_config = dict(type='mmocr', task='TextRecognition') diff --git a/configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-32x32-32x640.py b/configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-32x32-32x640.py new file mode 100644 index 000000000..6fca1265a --- /dev/null +++ b/configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-32x32-32x640.py @@ -0,0 +1,11 @@ +_base_ = ['./text-recognition_dynamic.py', '../../_base_/backends/tensorrt.py'] +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 1, 32, 32], + opt_shape=[1, 1, 32, 64], + max_shape=[1, 1, 32, 640]))) + ]) diff --git a/configs/mmocr/text-recognition/text-recognition_tensorrt_static-32x32.py b/configs/mmocr/text-recognition/text-recognition_tensorrt_static-32x32.py new file mode 100644 index 000000000..a7e653c8a --- /dev/null +++ b/configs/mmocr/text-recognition/text-recognition_tensorrt_static-32x32.py @@ -0,0 +1,13 @@ +_base_ = ['./text-recognition_static.py', '../../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=[32, 32]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 1, 32, 32], + opt_shape=[1, 1, 32, 32], + max_shape=[1, 1, 32, 32]))) + ]) diff --git a/configs/mmseg/base.py b/configs/mmseg/base.py deleted file mode 100644 index 1aec4d154..000000000 --- a/configs/mmseg/base.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = ['../_base_/torch2onnx.py'] -codebase = 'mmseg' -pytorch2onnx = dict( - input_names=['input'], - output_names=['output'], -) diff --git a/configs/mmseg/ncnn.py b/configs/mmseg/ncnn.py deleted file mode 100644 index 896944830..000000000 --- a/configs/mmseg/ncnn.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base.py', '../_base_/backends/ncnn.py'] diff --git a/configs/mmseg/onnxruntime.py b/configs/mmseg/onnxruntime.py deleted file mode 100644 index 83544d08b..000000000 --- a/configs/mmseg/onnxruntime.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmseg/onnxruntime_dynamic.py b/configs/mmseg/onnxruntime_dynamic.py deleted file mode 100644 index 89ca7f9a1..000000000 --- a/configs/mmseg/onnxruntime_dynamic.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmseg/ppl.py b/configs/mmseg/ppl.py deleted file mode 100644 index a644bf326..000000000 --- a/configs/mmseg/ppl.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmseg/ppl_dynamic.py b/configs/mmseg/ppl_dynamic.py deleted file mode 100644 index 7a81978b0..000000000 --- a/configs/mmseg/ppl_dynamic.py +++ /dev/null @@ -1 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmseg/base_dynamic.py b/configs/mmseg/segmentation_dynamic.py similarity index 80% rename from configs/mmseg/base_dynamic.py rename to configs/mmseg/segmentation_dynamic.py index 4e0cf26f5..c12860e85 100644 --- a/configs/mmseg/base_dynamic.py +++ b/configs/mmseg/segmentation_dynamic.py @@ -1,5 +1,5 @@ -_base_ = ['./base.py'] -pytorch2onnx = dict( +_base_ = ['./segmentation_static.py'] +onnx_config = dict( dynamic_axes={ 'input': { 0: 'batch', diff --git a/configs/mmseg/segmentation_ncnn_static.py b/configs/mmseg/segmentation_ncnn_static.py new file mode 100644 index 000000000..985542b5b --- /dev/null +++ b/configs/mmseg/segmentation_ncnn_static.py @@ -0,0 +1,3 @@ +_base_ = ['./segmentation_static.py', '../_base_/backends/ncnn.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmseg/segmentation_onnxruntime_dynamic.py b/configs/mmseg/segmentation_onnxruntime_dynamic.py new file mode 100644 index 000000000..5553ee373 --- /dev/null +++ b/configs/mmseg/segmentation_onnxruntime_dynamic.py @@ -0,0 +1 @@ +_base_ = ['./segmentation_dynamic.py', '../_base_/backends/onnxruntime.py'] diff --git a/configs/mmseg/segmentation_onnxruntime_static.py b/configs/mmseg/segmentation_onnxruntime_static.py new file mode 100644 index 000000000..802eb08a4 --- /dev/null +++ b/configs/mmseg/segmentation_onnxruntime_static.py @@ -0,0 +1,3 @@ +_base_ = ['./segmentation_static.py', '../_base_/backends/onnxruntime.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmseg/segmentation_ppl_dynamic.py b/configs/mmseg/segmentation_ppl_dynamic.py new file mode 100644 index 000000000..c45dd6233 --- /dev/null +++ b/configs/mmseg/segmentation_ppl_dynamic.py @@ -0,0 +1 @@ +_base_ = ['./segmentation_dynamic.py', '../_base_/backends/ppl.py'] diff --git a/configs/mmseg/segmentation_ppl_static.py b/configs/mmseg/segmentation_ppl_static.py new file mode 100644 index 000000000..c809ad3df --- /dev/null +++ b/configs/mmseg/segmentation_ppl_static.py @@ -0,0 +1,3 @@ +_base_ = ['./segmentation_static.py', '../_base_/backends/ppl.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmseg/segmentation_static.py b/configs/mmseg/segmentation_static.py new file mode 100644 index 000000000..434a8fae9 --- /dev/null +++ b/configs/mmseg/segmentation_static.py @@ -0,0 +1,2 @@ +_base_ = ['../_base_/onnx_config.py'] +codebase_config = dict(type='mmseg', task='Segmentation') diff --git a/configs/mmseg/segmentation_tensorrt_dynamic-512x1024-2048x2048.py b/configs/mmseg/segmentation_tensorrt_dynamic-512x1024-2048x2048.py new file mode 100644 index 000000000..90f978cd7 --- /dev/null +++ b/configs/mmseg/segmentation_tensorrt_dynamic-512x1024-2048x2048.py @@ -0,0 +1,11 @@ +_base_ = ['./segmentation_dynamic.py', '../_base_/backends/tensorrt.py'] +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 512, 1024], + opt_shape=[1, 3, 1024, 2048], + max_shape=[1, 3, 2048, 2048]))) + ]) diff --git a/configs/mmseg/segmentation_tensorrt_static-512x1024.py b/configs/mmseg/segmentation_tensorrt_static-512x1024.py new file mode 100644 index 000000000..be8379d4f --- /dev/null +++ b/configs/mmseg/segmentation_tensorrt_static-512x1024.py @@ -0,0 +1,13 @@ +_base_ = ['./segmentation_static.py', '../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=[1024, 512]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 512, 1024], + opt_shape=[1, 3, 512, 1024], + max_shape=[1, 3, 512, 1024]))) + ]) diff --git a/configs/mmseg/tensorrt.py b/configs/mmseg/tensorrt.py deleted file mode 100644 index 196567ca0..000000000 --- a/configs/mmseg/tensorrt.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = ['./base.py', '../_base_/backends/tensorrt.py'] -tensorrt_params = dict(model_params=[ - dict( - opt_shape_dict=dict( - input=[[1, 3, 512, 1024], [1, 3, 512, 1024], [1, 3, 512, 1024]]), - max_workspace_size=1 << 30) -]) diff --git a/configs/mmseg/tensorrt_dynamic.py b/configs/mmseg/tensorrt_dynamic.py deleted file mode 100644 index 9473ba235..000000000 --- a/configs/mmseg/tensorrt_dynamic.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = ['./base_dynamic.py', '../_base_/backends/tensorrt.py'] -tensorrt_params = dict(model_params=[ - dict( - opt_shape_dict=dict( - input=[[1, 3, 512, 512], [1, 3, 1024, 2048], [1, 3, 2048, 2048]]), - max_workspace_size=1 << 30) -]) diff --git a/mmdeploy/apis/calibration.py b/mmdeploy/apis/calibration.py index 3418f1fcf..999bf8b70 100644 --- a/mmdeploy/apis/calibration.py +++ b/mmdeploy/apis/calibration.py @@ -7,7 +7,7 @@ from mmdeploy.core import (RewriterContext, patch_model, reset_mark_function_count) -from mmdeploy.utils import get_codebase, load_config +from mmdeploy.utils import cfg_apply_marks, get_codebase, load_config from .utils import (build_dataloader, build_dataset, get_tensor_from_input, init_pytorch_model, run_inference) @@ -37,7 +37,7 @@ def create_calib_table(calib_file: str, dataset_cfg = load_config(dataset_cfg)[0] codebase = get_codebase(deploy_cfg) - apply_marks = deploy_cfg.get('apply_marks', False) + apply_marks = cfg_apply_marks(deploy_cfg) backend = 'default' model = init_pytorch_model( codebase, model_cfg, model_checkpoint, device=device) diff --git a/mmdeploy/apis/inference.py b/mmdeploy/apis/inference.py index a5fe7a04e..3b2550a68 100644 --- a/mmdeploy/apis/inference.py +++ b/mmdeploy/apis/inference.py @@ -2,7 +2,8 @@ import torch -from mmdeploy.utils import Backend, get_backend, get_codebase, load_config +from mmdeploy.utils import (Backend, get_backend, get_codebase, + get_input_shape, get_task_type, load_config) from .utils import (create_input, init_backend_model, init_pytorch_model, run_inference, visualize) @@ -19,6 +20,8 @@ def inference_model(model_cfg, deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) codebase = get_codebase(deploy_cfg) + task = get_task_type(deploy_cfg) + input_shape = get_input_shape(deploy_cfg) if backend is None: backend = get_backend(deploy_cfg) @@ -35,7 +38,8 @@ def inference_model(model_cfg, deploy_cfg=deploy_cfg, device_id=device_id) - model_inputs, _ = create_input(codebase, model_cfg, img, device) + model_inputs, _ = create_input(codebase, task, model_cfg, img, input_shape, + device) with torch.no_grad(): result = run_inference(codebase, model_inputs, model) diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py index 74cda6ebc..7792cdf30 100644 --- a/mmdeploy/apis/pytorch2onnx.py +++ b/mmdeploy/apis/pytorch2onnx.py @@ -6,7 +6,8 @@ from mmdeploy.core import (RewriterContext, patch_model, register_extra_symbolics) -from mmdeploy.utils import get_backend, get_codebase, load_config +from mmdeploy.utils import (get_backend, get_codebase, get_input_shape, + get_onnx_config, get_task_type, load_config) from .utils import create_input, init_pytorch_model @@ -19,7 +20,7 @@ def torch2onnx_impl(model: torch.nn.Module, input: torch.Tensor, raise TypeError('deploy_cfg must be a filename or Config object, ' f'but got {type(deploy_cfg)}') - pytorch2onnx_cfg = deploy_cfg['pytorch2onnx'] + pytorch2onnx_cfg = get_onnx_config(deploy_cfg) backend = get_backend(deploy_cfg).value opset_version = pytorch2onnx_cfg.get('opset_version', 11) @@ -53,15 +54,17 @@ def torch2onnx(img: Any, # load deploy_cfg if necessary deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) - mmcv.mkdir_or_exist(osp.abspath(work_dir)) output_file = osp.join(work_dir, save_file) codebase = get_codebase(deploy_cfg) + task = get_task_type(deploy_cfg) + input_shape = get_input_shape(deploy_cfg) torch_model = init_pytorch_model(codebase, model_cfg, model_checkpoint, device) - data, model_inputs = create_input(codebase, model_cfg, img, device) + data, model_inputs = create_input(codebase, task, model_cfg, img, + input_shape, device) if not isinstance(model_inputs, torch.Tensor): model_inputs = model_inputs[0] diff --git a/mmdeploy/apis/tensorrt/calib_utils.py b/mmdeploy/apis/tensorrt/calib_utils.py index e2726294d..3aa21b9e5 100644 --- a/mmdeploy/apis/tensorrt/calib_utils.py +++ b/mmdeploy/apis/tensorrt/calib_utils.py @@ -55,7 +55,7 @@ def get_batch(self, names, **kwargs): data_torch = torch.from_numpy(data_np) # tile the tensor so we can keep the same distribute - opt_shape = self.opt_shape_dict[name][1] + opt_shape = self.opt_shape_dict[name]['opt_shape'] data_shape = data_torch.shape reps = [ diff --git a/mmdeploy/apis/tensorrt/onnx2tensorrt.py b/mmdeploy/apis/tensorrt/onnx2tensorrt.py index f68cb8293..0227e408c 100644 --- a/mmdeploy/apis/tensorrt/onnx2tensorrt.py +++ b/mmdeploy/apis/tensorrt/onnx2tensorrt.py @@ -5,6 +5,8 @@ import onnx import tensorrt as trt +from mmdeploy.utils import (get_calib_filename, get_common_config, + get_model_inputs, load_config) from .tensorrt_utils import create_trt_engine, save_trt_engine @@ -25,27 +27,19 @@ def onnx2tensorrt(work_dir: str, **kwargs): # load deploy_cfg if necessary - if isinstance(deploy_cfg, str): - deploy_cfg = mmcv.Config.fromfile(deploy_cfg) - elif not isinstance(deploy_cfg, mmcv.Config): - raise TypeError('deploy_cfg must be a filename or Config object, ' - f'but got {type(deploy_cfg)}') + deploy_cfg = load_config(deploy_cfg)[0] mmcv.mkdir_or_exist(osp.abspath(work_dir)) - assert 'tensorrt_params' in deploy_cfg + common_params = get_common_config(deploy_cfg) + model_params = get_model_inputs(deploy_cfg)[model_id] - tensorrt_params = deploy_cfg['tensorrt_params'] - shared_params = tensorrt_params.get('shared_params', dict()) - model_params = tensorrt_params['model_params'][model_id] - - final_params = shared_params + final_params = common_params final_params.update(model_params) int8_param = final_params.get('int8_param', dict()) - if deploy_cfg.get('create_calib', False): - calib_params = deploy_cfg.get('calib_params', dict()) - calib_file = calib_params.get('calib_file', 'calib_file.h5') + calib_file = get_calib_filename(deploy_cfg) + if calib_file is not None: int8_param['calib_file'] = osp.join(work_dir, calib_file) int8_param['model_type'] = partition_type @@ -53,7 +47,7 @@ def onnx2tensorrt(work_dir: str, device_id = parse_device_id(device) engine = create_trt_engine( onnx_model, - opt_shape_dict=final_params['opt_shape_dict'], + opt_shape_dict=final_params['input_shapes'], log_level=final_params.get('log_level', trt.Logger.WARNING), fp16_mode=final_params.get('fp16_mode', False), int8_mode=final_params.get('int8_mode', False), diff --git a/mmdeploy/apis/tensorrt/tensorrt_utils.py b/mmdeploy/apis/tensorrt/tensorrt_utils.py index eb7bbc42b..b76ba2ee4 100644 --- a/mmdeploy/apis/tensorrt/tensorrt_utils.py +++ b/mmdeploy/apis/tensorrt/tensorrt_utils.py @@ -34,9 +34,9 @@ def create_trt_engine(onnx_model, Example: >>> engine = create_trt_engine( >>> "onnx_model.onnx", - >>> {'input': [[1, 3, 160, 160], - >>> [1, 3, 320, 320], - >>> [1, 3, 640, 640]]}, + >>> {'input': {"min_shape" : [1, 3, 160, 160], + >>> "opt_shape" :[1, 3, 320, 320], + >>> "max_shape" :[1, 3, 640, 640]}}, >>> log_level=trt.Logger.WARNING, >>> fp16_mode=True, >>> max_workspace_size=1 << 30, @@ -72,9 +72,9 @@ def create_trt_engine(onnx_model, profile = builder.create_optimization_profile() for input_name, param in opt_shape_dict.items(): - min_shape = tuple(param[0][:]) - opt_shape = tuple(param[1][:]) - max_shape = tuple(param[2][:]) + min_shape = param['min_shape'] + opt_shape = param['opt_shape'] + max_shape = param['max_shape'] profile.set_shape(input_name, min_shape, opt_shape, max_shape) config.add_optimization_profile(profile) diff --git a/mmdeploy/apis/utils.py b/mmdeploy/apis/utils.py index 8839769bb..7d2372c58 100644 --- a/mmdeploy/apis/utils.py +++ b/mmdeploy/apis/utils.py @@ -3,7 +3,7 @@ import mmcv import numpy as np -from mmdeploy.utils import Backend, Codebase, get_codebase, load_config +from mmdeploy.utils import Backend, Codebase, Task, get_codebase, load_config def init_pytorch_model(codebase: Codebase, @@ -26,8 +26,7 @@ def init_pytorch_model(codebase: Codebase, model = convert_syncbatchnorm(model) elif codebase == Codebase.MMOCR: - from mmdet.apis import init_detector - from mmocr.models import build_detector # noqa: F401 + from mmocr.apis import init_detector model = init_detector(model_cfg, model_checkpoint, device, cfg_options) elif codebase == Codebase.MMEDIT: @@ -41,8 +40,10 @@ def init_pytorch_model(codebase: Codebase, def create_input(codebase: Codebase, + task: Task, model_cfg: Union[str, mmcv.Config], imgs: Any, + input_shape: Sequence[int] = None, device: str = 'cuda:0', **kwargs): model_cfg = load_config(model_cfg)[0] @@ -50,23 +51,23 @@ def create_input(codebase: Codebase, cfg = model_cfg.copy() if codebase == Codebase.MMCLS: from mmdeploy.mmcls.export import create_input - return create_input(cfg, imgs, device, **kwargs) + return create_input(task, cfg, imgs, input_shape, device, **kwargs) elif codebase == Codebase.MMDET: from mmdeploy.mmdet.export import create_input - return create_input(cfg, imgs, device, **kwargs) + return create_input(task, cfg, imgs, input_shape, device, **kwargs) elif codebase == Codebase.MMOCR: from mmdeploy.mmocr.export import create_input - return create_input(cfg, imgs, device, **kwargs) + return create_input(task, cfg, imgs, input_shape, device, **kwargs) elif codebase == Codebase.MMSEG: from mmdeploy.mmseg.export import create_input - return create_input(cfg, imgs, device, **kwargs) + return create_input(task, cfg, imgs, input_shape, device, **kwargs) elif codebase == Codebase.MMEDIT: from mmdeploy.mmedit.export import create_input - return create_input(cfg, imgs, device, **kwargs) + return create_input(task, cfg, imgs, input_shape, device, **kwargs) else: raise NotImplementedError(f'Unknown codebase type: {codebase.value}') diff --git a/mmdeploy/core/optimizers/function_marker.py b/mmdeploy/core/optimizers/function_marker.py index ea91d5948..2942f2dc7 100644 --- a/mmdeploy/core/optimizers/function_marker.py +++ b/mmdeploy/core/optimizers/function_marker.py @@ -3,7 +3,7 @@ import torch from mmdeploy.core.rewriters.function_rewriter import FUNCTION_REWRITER -from mmdeploy.utils import get_codebase +from mmdeploy.utils import cfg_apply_marks, get_codebase, get_partition_config MARK_FUNCTION_COUNT = dict() @@ -53,7 +53,7 @@ def forward(ctx, x, *args): @FUNCTION_REWRITER.register_rewriter( 'mmdeploy.core.optimizers.function_marker.Mark.symbolic') def mark_symbolic(rewriter, g, x, *args): - if rewriter.cfg.get('apply_marks', False): + if cfg_apply_marks(rewriter.cfg): return rewriter.origin_func(g, x, *args) return x @@ -64,13 +64,13 @@ def forward_of_mark(rewriter, ctx, x, dtype, shape, func, func_id, type, name, id, attrs): deploy_cfg = rewriter.cfg # save calib data - apply_marks = deploy_cfg.get('apply_marks', False) + apply_marks = cfg_apply_marks(deploy_cfg) create_calib = getattr(rewriter, 'create_calib', False) if apply_marks and create_calib: codebase = get_codebase(deploy_cfg) - assert 'partition_params' in deploy_cfg - partition_params = deploy_cfg['partition_params'] - partition_type = partition_params['partition_type'] + partition_params = get_partition_config(deploy_cfg) + assert partition_params is not None, 'No partition config.' + partition_type = partition_params['type'] from mmdeploy.apis.utils import get_partition_cfg partition_cfgs = get_partition_cfg(codebase, partition_type) assert hasattr(rewriter, 'calib_file') diff --git a/mmdeploy/mmcls/export/prepare_input.py b/mmdeploy/mmcls/export/prepare_input.py index f17467676..5f3600c28 100644 --- a/mmdeploy/mmcls/export/prepare_input.py +++ b/mmdeploy/mmcls/export/prepare_input.py @@ -1,4 +1,5 @@ -from typing import Any, Optional, Union +import logging +from typing import Any, Optional, Sequence, Union import mmcv from mmcls.datasets import build_dataloader as build_dataloader_mmcls @@ -6,12 +7,15 @@ from mmcls.datasets.pipelines import Compose from mmcv.parallel import collate, scatter -from mmdeploy.utils.config_utils import load_config +from mmdeploy.utils import Task, load_config -def create_input(model_cfg: Union[str, mmcv.Config], +def create_input(task: Task, + model_cfg: Union[str, mmcv.Config], imgs: Any, + input_shape: Sequence[int] = None, device: str = 'cuda:0'): + assert task == Task.CLASSIFICATION cfg = load_config(model_cfg)[0].copy() if isinstance(imgs, str): if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile': @@ -21,6 +25,14 @@ def create_input(model_cfg: Union[str, mmcv.Config], if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile': cfg.data.test.pipeline.pop(0) data = dict(img=imgs) + # for static exporting + if input_shape is not None: + if 'crop_size' in cfg.data.test.pipeline[2]: + crop_size = cfg.data.test.pipeline[2]['crop_size'] + if tuple(input_shape) != (crop_size, crop_size): + logging.warning( + f'`input shape` should be equal to `crop_size`: {crop_size},\ + but given: {input_shape}') test_pipeline = Compose(cfg.data.test.pipeline) data = test_pipeline(data) data = collate([data], samples_per_gpu=1) diff --git a/mmdeploy/mmdet/apis/inference.py b/mmdeploy/mmdet/apis/inference.py index 3ac48222b..f0dfbba81 100644 --- a/mmdeploy/mmdet/apis/inference.py +++ b/mmdeploy/mmdet/apis/inference.py @@ -9,7 +9,8 @@ from mmdet.models import BaseDetector from mmdeploy.mmdet.core.post_processing import multiclass_nms -from mmdeploy.utils.config_utils import Backend, get_backend, load_config +from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, + get_partition_config, load_config) class DeployBaseDetector(BaseDetector): @@ -176,7 +177,7 @@ def partition0_postprocess(self, scores, bboxes): cfg = self.model_cfg.model.test_cfg deploy_cfg = self.deploy_cfg - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) @@ -271,7 +272,7 @@ def partition0_postprocess(self, x, scores, bboxes): cfg = self.model_cfg.model.test_cfg.rpn deploy_cfg = self.deploy_cfg - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) pre_top_k = post_params.pre_top_k @@ -499,14 +500,16 @@ def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs): ONNXRUNTIME_DETECTOR_MAP = dict( end2end=ONNXRuntimeDetector, - single_stage_base=ONNXRuntimePSSDetector, - two_stage_base=ONNXRuntimePTSDetector) + single_stage=ONNXRuntimePSSDetector, + two_stage=ONNXRuntimePTSDetector) + TENSORRT_DETECTOR_MAP = dict( - end2end=TensorRTDetector, two_stage_base=TensorRTPTSDetector) + end2end=TensorRTDetector, two_stage=TensorRTPTSDetector) PPL_DETECTOR_MAP = dict(end2end=PPLDetector) + NCNN_DETECTOR_MAP = dict( - single_stage_base=NCNNPSSDetector, two_stage_base=NCNNPTSDetector) + single_stage=NCNNPSSDetector, two_stage=NCNNPTSDetector) BACKEND_DETECTOR_MAP = { Backend.ONNXRUNTIME: ONNXRUNTIME_DETECTOR_MAP, @@ -518,8 +521,7 @@ def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs): def build_detector(model_files, model_cfg, deploy_cfg, device_id, **kwargs): # load cfg if necessary - deploy_cfg = load_config(deploy_cfg)[0] - model_cfg = load_config(model_cfg)[0] + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) backend = get_backend(deploy_cfg) class_names = get_classes_from_config(model_cfg) @@ -529,9 +531,9 @@ def build_detector(model_files, model_cfg, deploy_cfg, device_id, **kwargs): detector_map = BACKEND_DETECTOR_MAP[backend] partition_type = 'end2end' - if deploy_cfg.get('apply_marks', False): - partition_params = deploy_cfg.get('partition_params', dict()) - partition_type = partition_params.get('partition_type', None) + partition_config = get_partition_config(deploy_cfg) + if partition_config is not None: + partition_type = partition_config.get('type', None) assert partition_type in detector_map,\ f'Unsupported partition type: {partition_type}' diff --git a/mmdeploy/mmdet/export/model_partition.py b/mmdeploy/mmdet/export/model_partition.py index d6db3562c..f3a261374 100644 --- a/mmdeploy/mmdet/export/model_partition.py +++ b/mmdeploy/mmdet/export/model_partition.py @@ -1,5 +1,5 @@ MMDET_PARTITION_CFG = dict( - single_stage_base=[ + single_stage=[ dict( save_file='partition0.onnx', start='detector_forward:input', @@ -21,7 +21,7 @@ }, ) ], - two_stage_base=[ + two_stage=[ dict( save_file='partition0.onnx', start='detector_forward:input', diff --git a/mmdeploy/mmdet/export/prepare_input.py b/mmdeploy/mmdet/export/prepare_input.py index 56e95bd3b..aaf26bd87 100644 --- a/mmdeploy/mmdet/export/prepare_input.py +++ b/mmdeploy/mmdet/export/prepare_input.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Union +from typing import Any, Optional, Sequence, Union import mmcv import numpy as np @@ -8,12 +8,15 @@ from mmdet.datasets import replace_ImageToTensor from mmdet.datasets.pipelines import Compose -from mmdeploy.utils.config_utils import load_config +from mmdeploy.utils import Task, load_config -def create_input(model_cfg: Union[str, mmcv.Config], +def create_input(task: Task, + model_cfg: Union[str, mmcv.Config], imgs: Any, + input_shape: Sequence[int] = None, device: str = 'cuda:0'): + assert task == Task.OBJECT_DETECTION cfg = load_config(model_cfg)[0].copy() if not isinstance(imgs, (list, tuple)): @@ -23,6 +26,16 @@ def create_input(model_cfg: Union[str, mmcv.Config], cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' + # for static exporting + if input_shape is not None: + cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape) + transforms = cfg.data.test.pipeline[1]['transforms'] + for trans in transforms: + trans_type = trans['type'] + if trans_type == 'Resize': + trans['keep_ratio'] = False + elif trans_type == 'Pad': + trans['size_divisor'] = 1 cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) diff --git a/mmdeploy/mmdet/models/dense_heads/anchor_head.py b/mmdeploy/mmdet/models/dense_heads/anchor_head.py index 7507a3f20..99056f35f 100644 --- a/mmdeploy/mmdet/models/dense_heads/anchor_head.py +++ b/mmdeploy/mmdet/models/dense_heads/anchor_head.py @@ -3,8 +3,8 @@ from mmdeploy.core import FUNCTION_REWRITER from mmdeploy.mmdet.core import multiclass_nms from mmdeploy.mmdet.export import pad_with_value -from mmdeploy.utils import is_dynamic_shape -from mmdeploy.utils.config_utils import Backend, get_backend +from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, + is_dynamic_shape) @FUNCTION_REWRITER.register_rewriter( @@ -98,7 +98,7 @@ def get_bboxes_of_anchor_head(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) @@ -195,7 +195,7 @@ def get_bboxes_of_anchor_head_ncnn(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/dense_heads/fcos_head.py b/mmdeploy/mmdet/models/dense_heads/fcos_head.py index 9f9014c20..b51825093 100644 --- a/mmdeploy/mmdet/models/dense_heads/fcos_head.py +++ b/mmdeploy/mmdet/models/dense_heads/fcos_head.py @@ -3,8 +3,8 @@ from mmdeploy.core import FUNCTION_REWRITER from mmdeploy.mmdet.core import distance2bbox, multiclass_nms from mmdeploy.mmdet.export import pad_with_value -from mmdeploy.utils import is_dynamic_shape -from mmdeploy.utils.config_utils import Backend, get_backend +from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, + is_dynamic_shape) @FUNCTION_REWRITER.register_rewriter( @@ -97,7 +97,7 @@ def get_bboxes_of_fcos_head(ctx, return batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_centerness - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) @@ -189,7 +189,7 @@ def get_bboxes_of_fcos_head_ncnn(ctx, batch_mlvl_scores = (_batch_mlvl_scores * _batch_mlvl_centerness). \ reshape(batch_mlvl_scores.shape) batch_mlvl_bboxes = batch_mlvl_bboxes.reshape(batch_size, -1, 4) - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/dense_heads/rpn_head.py b/mmdeploy/mmdet/models/dense_heads/rpn_head.py index 3011d0867..8cd065169 100644 --- a/mmdeploy/mmdet/models/dense_heads/rpn_head.py +++ b/mmdeploy/mmdet/models/dense_heads/rpn_head.py @@ -3,8 +3,8 @@ from mmdeploy.core import FUNCTION_REWRITER from mmdeploy.mmdet.core import multiclass_nms from mmdeploy.mmdet.export import pad_with_value -from mmdeploy.utils import is_dynamic_shape -from mmdeploy.utils.config_utils import Backend, get_backend +from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, + is_dynamic_shape) @FUNCTION_REWRITER.register_rewriter('mmdet.models.RPNHead.get_bboxes') @@ -93,7 +93,7 @@ def get_bboxes_of_rpn_head(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) pre_top_k = post_params.pre_top_k @@ -185,7 +185,7 @@ def get_bboxes_of_rpn_head_ncnn(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = deploy_cfg.post_processing + post_params = get_mmdet_params(deploy_cfg) iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) pre_top_k = post_params.pre_top_k diff --git a/mmdeploy/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/mmdeploy/mmdet/models/roi_heads/bbox_heads/bbox_head.py index c797e1632..cba1e7ddf 100644 --- a/mmdeploy/mmdet/models/roi_heads/bbox_heads/bbox_head.py +++ b/mmdeploy/mmdet/models/roi_heads/bbox_heads/bbox_head.py @@ -3,6 +3,7 @@ from mmdeploy.core import FUNCTION_REWRITER, mark from mmdeploy.mmdet.core import multiclass_nms +from mmdeploy.utils import get_mmdet_params @FUNCTION_REWRITER.register_rewriter( @@ -56,7 +57,7 @@ def get_bboxes_of_bbox_head(ctx, self, rois, cls_score, bbox_pred, img_shape, bboxes = bboxes[dim0_inds, max_inds].reshape(batch_size, -1, 4) # get nms params - post_params = ctx.cfg.post_processing + post_params = get_mmdet_params(ctx.cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/mmdeploy/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py index aba535e01..cd3c12cff 100644 --- a/mmdeploy/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py +++ b/mmdeploy/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py @@ -2,7 +2,7 @@ import torch.nn.functional as F from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.utils.config_utils import Backend, get_backend +from mmdeploy.utils import Backend, get_backend @FUNCTION_REWRITER.register_rewriter( diff --git a/mmdeploy/mmedit/export/prepare_input.py b/mmdeploy/mmedit/export/prepare_input.py index 972991833..726690f86 100644 --- a/mmdeploy/mmedit/export/prepare_input.py +++ b/mmdeploy/mmedit/export/prepare_input.py @@ -1,4 +1,4 @@ -from typing import Any, Union +from typing import Any, Sequence, Union import mmcv import numpy as np @@ -7,7 +7,7 @@ from mmedit.datasets import build_dataset as build_dataset_mmedit from mmedit.datasets.pipelines import Compose -from mmdeploy.utils.config_utils import Task, load_config +from mmdeploy.utils import Task, load_config def _preprocess_cfg(config): @@ -27,10 +27,11 @@ def _preprocess_cfg(config): pipeline['meta_keys'].remove(key) -def create_input(model_cfg: Union[str, mmcv.Config], +def create_input(task: Task, + model_cfg: Union[str, mmcv.Config], imgs: Any, - device: str = 'cuda:0', - task: Task = Task.SUPER_RESOLUTION): + input_shape: Sequence[int] = None, + device: str = 'cuda:0'): if isinstance(imgs, (list, tuple)): if not isinstance(imgs[0], (np.ndarray, str)): raise AssertionError('imgs must be strings or numpy arrays') @@ -45,7 +46,19 @@ def create_input(model_cfg: Union[str, mmcv.Config], if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type - cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' + cfg.test_pipeline[0].type = 'LoadImageFromWebcam' + + # for static exporting + if input_shape is not None: + if task == Task.SUPER_RESOLUTION: + resize = { + 'type': 'Resize', + 'scale': (input_shape[0], input_shape[1]), + 'keys': ['lq'] + } + cfg.test_pipeline.insert(1, resize) + else: + raise NotImplementedError(f'Unknown task type: {task.value}') test_pipeline = Compose(cfg.test_pipeline) diff --git a/mmdeploy/mmocr/export/prepare_input.py b/mmdeploy/mmocr/export/prepare_input.py index e593c3c5e..115dda471 100644 --- a/mmdeploy/mmocr/export/prepare_input.py +++ b/mmdeploy/mmocr/export/prepare_input.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Union +from typing import Any, Optional, Sequence, Union import mmcv import numpy as np @@ -7,11 +7,13 @@ from mmocr.datasets import build_dataloader as build_dataloader_mmocr from mmocr.datasets import build_dataset as build_dataset_mmocr -from mmdeploy.utils.config_utils import load_config +from mmdeploy.utils import Task, load_config -def create_input(model_cfg: Union[str, mmcv.Config], +def create_input(task: Task, + model_cfg: Union[str, mmcv.Config], imgs: Any, + input_shape: Sequence[int] = None, device: str = 'cuda:0'): if isinstance(imgs, (list, tuple)): if not isinstance(imgs[0], (np.ndarray, str)): @@ -35,6 +37,21 @@ def create_input(model_cfg: Union[str, mmcv.Config], model_cfg.data.test.pipeline = replace_ImageToTensor( model_cfg.data.test.pipeline) + # for static exporting + if input_shape is not None: + if task == Task.TEXT_DETECTION: + model_cfg.data.test.pipeline[1].img_scale = tuple(input_shape) + model_cfg.data.test.pipeline[1].transforms[0].keep_ratio = False + model_cfg.data.test.pipeline[1].transforms[0].img_scale = tuple( + input_shape) + elif task == Task.TEXT_RECOGNITION: + resize = { + 'height': input_shape[0], + 'min_width': input_shape[1], + 'max_width': input_shape[1], + 'keep_aspect_ratio': False + } + model_cfg.data.test.pipeline[1].update(resize) from mmdet.datasets.pipelines import Compose from mmocr.datasets import build_dataset # noqa: F401 test_pipeline = Compose(model_cfg.data.test.pipeline) diff --git a/mmdeploy/mmseg/export/prepare_input.py b/mmdeploy/mmseg/export/prepare_input.py index 0fbf0f3ea..3eebacdad 100644 --- a/mmdeploy/mmseg/export/prepare_input.py +++ b/mmdeploy/mmseg/export/prepare_input.py @@ -1,4 +1,4 @@ -from typing import Any, Union +from typing import Any, Sequence, Union import mmcv import numpy as np @@ -8,13 +8,16 @@ from mmseg.datasets import build_dataset as build_dataset_mmseg from mmseg.datasets.pipelines import Compose -from mmdeploy.utils.config_utils import load_config +from mmdeploy.utils import Task, load_config -def create_input(model_cfg: Union[str, mmcv.Config], +def create_input(task: Task, + model_cfg: Union[str, mmcv.Config], imgs: Any, + input_shape: Sequence[int] = None, device: str = 'cuda:0'): + assert task == Task.SEGMENTATION cfg = load_config(model_cfg)[0].copy() if not isinstance(imgs, (list, tuple)): imgs = [imgs] @@ -23,8 +26,9 @@ def create_input(model_cfg: Union[str, mmcv.Config], cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' - # TODO remove hard code - cfg.data.test.pipeline[1]['img_scale'] = (1024, 512) + # for static exporting + if input_shape is not None: + cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape) cfg.data.test.pipeline[1]['transforms'][0]['keep_ratio'] = False cfg.data.test.pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] diff --git a/mmdeploy/utils/__init__.py b/mmdeploy/utils/__init__.py index 9b025eec1..ec36ead2f 100644 --- a/mmdeploy/utils/__init__.py +++ b/mmdeploy/utils/__init__.py @@ -1,8 +1,15 @@ -from .config_utils import (get_backend, get_codebase, get_task_type, - is_dynamic_batch, is_dynamic_shape, load_config) +from .config_utils import (cfg_apply_marks, get_backend, get_calib_config, + get_calib_filename, get_codebase, get_common_config, + get_input_shape, get_mmdet_params, get_model_inputs, + get_onnx_config, get_partition_config, + get_task_type, is_dynamic_batch, is_dynamic_shape, + load_config) from .constants import Backend, Codebase, Task __all__ = [ 'is_dynamic_batch', 'is_dynamic_shape', 'get_task_type', 'get_codebase', - 'get_backend', 'load_config', 'Backend', 'Codebase', 'Task' + 'get_backend', 'load_config', 'Backend', 'Codebase', 'Task', + 'get_onnx_config', 'get_partition_config', 'get_calib_config', + 'get_calib_filename', 'get_common_config', 'get_model_inputs', + 'cfg_apply_marks', 'get_mmdet_params', 'get_input_shape' ] diff --git a/mmdeploy/utils/config_utils.py b/mmdeploy/utils/config_utils.py index cf6ec73c6..911c216a6 100644 --- a/mmdeploy/utils/config_utils.py +++ b/mmdeploy/utils/config_utils.py @@ -6,7 +6,14 @@ def load_config(*args): - """Load the configuration and check the validity.""" + """Load the configuration and check the validity. + + Args: + arg (str | list[str]): The path to the config file(s). + + Returns: + mmcv.Config: The content of config. + """ def _load_config(cfg): if isinstance(cfg, str): @@ -23,40 +30,97 @@ def _load_config(cfg): def get_task_type(deploy_cfg: Union[str, mmcv.Config], default=None) -> Task: - """Get the task type of the algorithm.""" + """Get the task type of the algorithm. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + default (str): If the "task" field of config is emtpy, then return + default task type. + + Returns: + Task : An enumeration denotes the task type. + """ deploy_cfg = load_config(deploy_cfg)[0] - task_pairs = {i.value: i for i in Task} - task = task_pairs.get(deploy_cfg.get('task', default), default) + try: + task = deploy_cfg['codebase_config']['task'] + except KeyError: + return default + task = Task.get(task, default) return task def get_codebase(deploy_cfg: Union[str, mmcv.Config], default=None) -> Codebase: - """Get the codebase of the config.""" + """Get the codebase from the config. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + default (str): If the "codebase" field of config is emtpy, then return + default codebase type. + + Returns: + Codebase : An enumeration denotes the codebase type. + """ deploy_cfg = load_config(deploy_cfg)[0] - codebase_pairs = {i.value: i for i in Codebase} - codebase = codebase_pairs.get(deploy_cfg.get('codebase', default), default) + try: + codebase = deploy_cfg['codebase_config']['type'] + except KeyError: + return default + codebase = Codebase.get(codebase, default) return codebase def get_backend(deploy_cfg: Union[str, mmcv.Config], default=None) -> Backend: - """Get the backend of the config.""" + """Get the backend from the config. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + default (str): If the "backend" field of config is emtpy, then return + default backend type. + + Returns: + Backend: An enumeration denotes the backend type. + """ deploy_cfg = load_config(deploy_cfg)[0] - backend_pairs = {i.value: i for i in Backend} - backend = backend_pairs.get(deploy_cfg.get('backend', default), default) + try: + backend = deploy_cfg['backend_config']['type'] + except KeyError: + return default + backend = Backend.get(backend, default) return backend +def get_onnx_config(deploy_cfg: Union[str, mmcv.Config]) -> str: + """Get the onnx parameters in export() from config. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + + Returns: + dict: The config dictionary of onnx parameters + """ + + return deploy_cfg['onnx_config'] + + def is_dynamic_batch(deploy_cfg: Union[str, mmcv.Config], - input_name: str = 'input'): - """Check if input batch is dynamic.""" + input_name: str = 'input') -> bool: + """Check if input batch is dynamic. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + input_name (str): The name of input in onnx export parameter. + + Returns: + bool: Is config set dynamic batch (axis 0). + """ deploy_cfg = load_config(deploy_cfg)[0] # check if dynamic axes exist - dynamic_axes = deploy_cfg['pytorch2onnx'].get('dynamic_axes', None) + dynamic_axes = get_onnx_config(deploy_cfg).get('dynamic_axes', None) if dynamic_axes is None: return False @@ -74,11 +138,19 @@ def is_dynamic_batch(deploy_cfg: Union[str, mmcv.Config], def is_dynamic_shape(deploy_cfg: Union[str, mmcv.Config], input_name: str = 'input'): - """Check if input shape is dynamic.""" + """Check if input shape is dynamic. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + input_name (str): The name of input in onnx export parameter. + + Returns: + bool: Is config set dynamic shape (axis 2 and 3). + """ deploy_cfg = load_config(deploy_cfg)[0] # check if dynamic axes exist - dynamic_axes = deploy_cfg['pytorch2onnx'].get('dynamic_axes', None) + dynamic_axes = get_onnx_config(deploy_cfg).get('dynamic_axes', None) if dynamic_axes is None: return False @@ -92,3 +164,115 @@ def is_dynamic_shape(deploy_cfg: Union[str, mmcv.Config], return True return False + + +def get_input_shape(deploy_cfg: Union[str, mmcv.Config]): + """Get the input shape for static exporting. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + + Returns: + list: The input shape for backend model (axis 2 and 3), e.g [512, 512]. + """ + input_shape = get_onnx_config(deploy_cfg)['input_shape'] + if input_shape is not None: + assert len(input_shape) == 2, 'length of input_shape should equal to 2' + return input_shape + + +def cfg_apply_marks(deploy_cfg: Union[str, mmcv.Config]): + """Check if the model needs to be partitioned by checking if the config + contains 'apply_marks'. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + + Returns: + bool: Whether config contains 'apply_marks'. + """ + partition_config = deploy_cfg.get('partition_config', None) + if partition_config is None: + return None + + apply_marks = partition_config.get('apply_marks', False) + return apply_marks + + +def get_partition_config(deploy_cfg: Union[str, mmcv.Config]): + """Check if the model needs to be partitioned and get the config of + partition. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + + Returns: + dict: The config of partition + """ + partition_config = deploy_cfg.get('partition_config', None) + if partition_config is None: + return None + + apply_marks = partition_config.get('apply_marks', False) + if not apply_marks: + return None + + return partition_config + + +def get_calib_config(deploy_cfg: Union[str, mmcv.Config]): + """Check if the model has calibration configs. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + + Returns: + dict: The config of calibration + """ + + calib_config = deploy_cfg.get('calib_config', None) + return calib_config + + +def get_calib_filename(deploy_cfg: Union[str, mmcv.Config]): + """Check if the model needs to create calib and get output filename of + calib. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + + Returns: + str: The filename of output calib file + """ + + calib_config = get_calib_config(deploy_cfg) + if calib_config is None: + return None + create_calib = calib_config.get('create_calib', False) + if create_calib: + calib_filename = calib_config.get('calib_file', 'calib_file.h5') + return calib_filename + else: + return None + + +def get_common_config(deploy_cfg: Union[str, mmcv.Config]): + backend_config = deploy_cfg['backend_config'] + model_params = backend_config.get('common_config', dict()) + return model_params + + +def get_model_inputs(deploy_cfg: Union[str, mmcv.Config]): + backend_config = deploy_cfg['backend_config'] + model_params = backend_config.get('model_inputs', []) + return model_params + + +def get_mmdet_params(deploy_cfg: Union[str, mmcv.Config]): + deploy_cfg = load_config(deploy_cfg)[0] + codebase_key = 'codebase_config' + assert codebase_key in deploy_cfg + codebase_config = deploy_cfg[codebase_key] + post_params = codebase_config.get('post_processing', None) + assert post_params is not None + return post_params diff --git a/mmdeploy/utils/constants.py b/mmdeploy/utils/constants.py index f178c0e02..1b37ad1e4 100644 --- a/mmdeploy/utils/constants.py +++ b/mmdeploy/utils/constants.py @@ -1,13 +1,26 @@ from enum import Enum -class Task(Enum): +class AdvancedEnum(Enum): + + @classmethod + def get(cls, str, a): + for k in cls: + if k.value == str: + return k + return a + + +class Task(AdvancedEnum): TEXT_DETECTION = 'TextDetection' TEXT_RECOGNITION = 'TextRecognition' + SEGMENTATION = 'Segmentation' SUPER_RESOLUTION = 'SuperResolution' + CLASSIFICATION = 'Classification' + OBJECT_DETECTION = 'ObjectDetection' -class Codebase(Enum): +class Codebase(AdvancedEnum): MMDET = 'mmdet' MMSEG = 'mmseg' MMCLS = 'mmcls' @@ -15,7 +28,7 @@ class Codebase(Enum): MMEDIT = 'mmedit' -class Backend(Enum): +class Backend(AdvancedEnum): PYTORCH = 'pytorch' TENSORRT = 'tensorrt' ONNXRUNTIME = 'onnxruntime' diff --git a/mmdeploy/utils/export_info.py b/mmdeploy/utils/export_info.py index e5f24fd68..0d3e054d3 100644 --- a/mmdeploy/utils/export_info.py +++ b/mmdeploy/utils/export_info.py @@ -31,6 +31,10 @@ def dump_info(deploy_cfg: Union[str, mmcv.Config], sort_keys=False, indent=4) + if 'trt' in deploy_cfg: + deploy_cfg._cfg_dict.pop('trt') + deploy_cfg.backend_config.common_config.log_level = str( + deploy_cfg.backend_config.common_config.log_level) mmcv.dump( deploy_cfg._cfg_dict, '{}/deploy_cfg.json'.format(work_dir), diff --git a/mmdeploy/version.py b/mmdeploy/version.py new file mode 100644 index 000000000..b8b551516 --- /dev/null +++ b/mmdeploy/version.py @@ -0,0 +1,19 @@ +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '0.1.0' +short_version = __version__ + + +def parse_version_info(version_str): + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..6981bd723 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +-r requirements/build.txt +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/requirements/build.txt b/requirements/build.txt new file mode 100644 index 000000000..22632d283 --- /dev/null +++ b/requirements/build.txt @@ -0,0 +1,4 @@ +cython +numpy +packaging +setuptools diff --git a/requirements/optional.txt b/requirements/optional.txt new file mode 100644 index 000000000..46c97d4af --- /dev/null +++ b/requirements/optional.txt @@ -0,0 +1,5 @@ +mmocr>=0.3.0 +ncnn +onnxruntime>=1.8.0 +pyppl +tensorrt diff --git a/requirements/runtime.txt b/requirements/runtime.txt new file mode 100644 index 000000000..6114dfc58 --- /dev/null +++ b/requirements/runtime.txt @@ -0,0 +1,6 @@ +h5py +matplotlib +numpy +onnx>=1.8.0 +six +terminaltables diff --git a/requirements/tests.txt b/requirements/tests.txt new file mode 100644 index 000000000..fd4c0f26c --- /dev/null +++ b/requirements/tests.txt @@ -0,0 +1,3 @@ +flake8 +pytest +yapf diff --git a/setup.py b/setup.py index 2068abdc8..be72804d0 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,139 @@ -from setuptools import setup +import logging + +from setuptools import find_packages, setup + +version_file = 'mmdeploy/version.py' + +try: + from torch.utils.cpp_extension import BuildExtension + cmd_class = {'build_ext': BuildExtension} +except ModuleNotFoundError: + cmd_class = {} + logging.warning('Skip building ext ops due to the absence of torch.') + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a file but strips specific + versioning information. + + Args: + fname (str): path to the file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import sys + from os.path import exists + import re + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + elif '@git+' in line: + info['package'] = line + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + if __name__ == '__main__': - setup(name='mmdeploy', version=0.1) + setup( + name='mmdeploy', + version=get_version(), + description='OpenMMLab Model Deployment', + long_description=readme(), + long_description_content_type='text/markdown', + author='OpenMMLab', + author_email='openmmlab@gmail.com', + keywords='computer vision, model deployment', + url='/~https://github.com/open-mmlab/mmdeploy', + packages=find_packages(exclude=('configs', 'tools')), + include_package_data=True, + classifiers=[ + 'Development Status :: 3 - Alpha', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + ], + license='Apache License 2.0', + setup_requires=parse_requirements('requirements/build.txt'), + tests_require=parse_requirements('requirements/tests.txt'), + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'build': parse_requirements('requirements/build.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + }, + ext_modules=[], + cmdclass=cmd_class, + zip_safe=False) diff --git a/tools/deploy.py b/tools/deploy.py index c0af9ae56..7a1095669 100644 --- a/tools/deploy.py +++ b/tools/deploy.py @@ -10,9 +10,10 @@ from mmdeploy.apis import (create_calib_table, extract_model, inference_model, torch2onnx) -from mmdeploy.apis.utils import get_partition_cfg -from mmdeploy.utils.config_utils import (Backend, get_backend, get_codebase, - load_config) +from mmdeploy.apis.utils import get_partition_cfg as parse_partition_cfg +from mmdeploy.utils import (Backend, get_backend, get_calib_filename, + get_codebase, get_model_inputs, get_onnx_config, + get_partition_config, load_config) from mmdeploy.utils.export_info import dump_info @@ -93,7 +94,7 @@ def main(): deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path) if args.dump_info: - dump_info(deploy_cfg, model_cfg, args.work_dir, args.img, args.device) + dump_info(deploy_cfg, model_cfg, args.work_dir) # create work_dir if not mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) @@ -101,7 +102,7 @@ def main(): ret_value = mp.Value('d', 0, lock=False) # convert onnx - onnx_save_file = deploy_cfg['pytorch2onnx']['save_file'] + onnx_save_file = get_onnx_config(deploy_cfg)['save_file'] create_process( 'torch2onnx', target=torch2onnx, @@ -114,17 +115,16 @@ def main(): onnx_files = [osp.join(args.work_dir, onnx_save_file)] # partition model - apply_marks = deploy_cfg.get('apply_marks', False) - if apply_marks: - assert hasattr(deploy_cfg, 'partition_params') - partition_params = deploy_cfg['partition_params'] + partition_cfgs = get_partition_config(deploy_cfg) - if 'partition_cfg' in partition_params: - partition_cfgs = partition_params.get('partition_cfg', None) + if partition_cfgs is not None: + + if 'partition_cfg' in partition_cfgs: + partition_cfgs = partition_cfgs.get('partition_cfg', None) else: - assert 'partition_type' in partition_params - partition_cfgs = get_partition_cfg( - get_codebase(deploy_cfg), partition_params['partition_type']) + assert 'type' in partition_cfgs + partition_cfgs = parse_partition_cfg( + get_codebase(deploy_cfg), partition_cfgs['type']) origin_onnx_file = onnx_files[0] onnx_files = [] @@ -145,16 +145,14 @@ def main(): onnx_files.append(save_path) # calib data - create_calib = deploy_cfg.get('create_calib', False) - if create_calib: - calib_params = deploy_cfg.get('calib_params', dict()) - calib_file = calib_params.get('calib_file', 'calib_file.h5') - calib_file = osp.join(args.work_dir, calib_file) + calib_filename = get_calib_filename(deploy_cfg) + if calib_filename is not None: + calib_path = osp.join(args.work_dir, calib_filename) create_process( 'calibration', create_calib_table, - args=(calib_file, deploy_cfg_path, model_cfg_path, + args=(calib_path, deploy_cfg_path, model_cfg_path, checkpoint_path), kwargs=dict( dataset_cfg=args.calib_dataset_cfg, @@ -166,9 +164,7 @@ def main(): # convert backend backend = get_backend(deploy_cfg, 'default') if backend == Backend.TENSORRT: - assert hasattr(deploy_cfg, 'tensorrt_params') - tensorrt_params = deploy_cfg['tensorrt_params'] - model_params = tensorrt_params.get('model_params', []) + model_params = get_model_inputs(deploy_cfg) assert len(model_params) == len(onnx_files) from mmdeploy.apis.tensorrt import is_available as trt_is_available @@ -182,7 +178,8 @@ def main(): onnx_name = osp.splitext(osp.split(onnx_path)[1])[0] save_file = model_param.get('save_file', onnx_name + '.engine') - partition_type = 'end2end' if not apply_marks else onnx_name + partition_type = 'end2end' if partition_cfgs is None \ + else onnx_name create_process( f'onnx2tensorrt of {onnx_path}', target=onnx2tensorrt,