Skip to content

Commit

Permalink
NNAdapter support Intel OpenVINO
Browse files Browse the repository at this point in the history
  • Loading branch information
csy0225 committed Mar 1, 2022
1 parent a25fdaf commit da793e4
Show file tree
Hide file tree
Showing 21 changed files with 345 additions and 255 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,6 @@ class Program {
uint32_t output_count,
core::Argument* output_arguments);

void Init() {
static InferenceEngine::Core
}


private:
void Clear();
int CheckInputsAndOutputs(uint32_t input_count,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -24,4 +24,4 @@ set(DEPS ${NNADAPTER_OPERATIONS} ${NNADAPTER_UTILITIES} ${${DEVICE_NAME}_deps})

add_library(${DEVICE_NAME} SHARED ${SRCS})
target_link_libraries(${DEVICE_NAME} "-Wl,--start-group" ${DEPS} "-Wl,--end-group")
set(NNADAPTER_DEVICES ${NNADAPTER_DEVICES} ${DEVICE_NAME} CACHE INTERNAL "")
set(NNADAPTER_DEVICES ${NNADAPTER_DEVICES} ${DEVICE_NAME} CACHE INTERNAL "")
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,4 @@ REGISTER_CONVERTER(SOFTMAX, ConvertSoftmax)
REGISTER_CONVERTER(SUB, ConvertElementwise)
REGISTER_CONVERTER(TANH, ConvertUnaryActivations)

#endif
#endif // LITE_BACKENDS_NNADAPTER_NNADAPTER_SRC_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H_
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
namespace nnadapter {
namespace intel_openvino {

int ConvertBatchNormalization(Converter* converter, core::Operation* operation) {
int ConvertBatchNormalization(Converter* converter,
core::Operation* operation) {
BATCH_NORMALIZATION_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to Intel OpenVINO's OutputNode
Expand All @@ -33,8 +34,13 @@ int ConvertBatchNormalization(Converter* converter, core::Operation* operation)
auto mean_node = converter->ConvertToOutputNode(mean_operand);
auto variance_node = converter->ConvertToOutputNode(variance_operand);
// Create <BatchNormInference> Node for Intel OpenVINO
std::shared_ptr<Node> node = std::make_shared<default_opset::BatchNormInference>
(*input_node, *gamma_node, *beta_node, *mean_node, *variance_node, epsilon);
std::shared_ptr<Node> node =
std::make_shared<default_opset::BatchNormInference>(*input_node,
*gamma_node,
*beta_node,
*mean_node,
*variance_node,
epsilon);
auto output_node = std::make_shared<OutputNode>(node->output(0));
converter->UpdateOutputNodeMap(output_operand, output_node);
return NNADAPTER_NO_ERROR;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -38,7 +38,7 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
&pad_width_left,
&pad_width_right,
stride_width,
&dilation_width);
&dilation_width);
}

// Convert operand to Intel OpenVINO's OutputNode
Expand All @@ -48,21 +48,35 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
}
auto filter_node = converter->ConvertToOutputNode(filter_operand);
auto ov_auto_pad = ConvertToOVPadType(auto_pad);
auto ov_strides = ov::Strides({static_cast<size_t>(stride_height), static_cast<size_t>(stride_width)});
auto ov_diliations = ov::Strides({static_cast<size_t>(dilation_height), static_cast<size_t>(dilation_width)});
auto ov_pads_begin = ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_top), static_cast<std::ptrdiff_t>(pad_width_left)});
auto ov_pads_end = ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_bottom), static_cast<std::ptrdiff_t>(pad_width_right)});
auto ov_strides = ov::Strides(
{static_cast<size_t>(stride_height), static_cast<size_t>(stride_width)});
auto ov_diliations = ov::Strides({static_cast<size_t>(dilation_height),
static_cast<size_t>(dilation_width)});
auto ov_pads_begin =
ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_top),
static_cast<std::ptrdiff_t>(pad_width_left)});
auto ov_pads_end =
ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_bottom),
static_cast<std::ptrdiff_t>(pad_width_right)});
// Create <Convolution> Node for Intel OpenVINO
std::shared_ptr<OutputNode> output_node{nullptr};
std::shared_ptr<Node> node = std::make_shared<default_opset::Convolution>(*input_node, *filter_node, ov_strides,
ov_pads_begin, ov_pads_end, ov_diliations, ov_auto_pad);
std::shared_ptr<Node> node =
std::make_shared<default_opset::Convolution>(*input_node,
*filter_node,
ov_strides,
ov_pads_begin,
ov_pads_end,
ov_diliations,
ov_auto_pad);
auto conv_output_node = std::make_shared<OutputNode>(node->output(0));
converter->UpdateOutputNodeMap(output_operand, conv_output_node);
output_node = conv_output_node;
NNADAPTER_LOG(INFO) << "Convert conv2d success";
// Bias
auto unsqueeze_node = converter->AddUnsqueezeOutputNode(bias_operand, std::vector<size_t>({3}), std::vector<int64_t>({0,2,3}));
std::shared_ptr<Node> add_node = std::make_shared<default_opset::Add>(*conv_output_node, *unsqueeze_node);
auto unsqueeze_node = converter->AddUnsqueezeOutputNode(
bias_operand, std::vector<size_t>({3}), std::vector<int64_t>({0, 2, 3}));
std::shared_ptr<Node> add_node =
std::make_shared<default_opset::Add>(*conv_output_node, *unsqueeze_node);
auto add_output_node = std::make_shared<OutputNode>(add_node->output(0));
converter->UpdateOutputNodeMap(output_operand, add_output_node);
output_node = add_output_node;
Expand All @@ -71,9 +85,10 @@ int ConvertConv2D(Converter* converter, core::Operation* operation) {
switch (fuse_code) {
#define CONVERT_UNARY_ACTIVATION(type, class_name) \
case NNADAPTER_FUSED_##type: { \
std::shared_ptr<Node> act_node = std::make_shared<default_opset::class_name>(*output_node); \
std::shared_ptr<Node> act_node = \
std::make_shared<default_opset::class_name>(*output_node); \
auto act_output_node = std::make_shared<OutputNode>(act_node->output(0)); \
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
} break;
CONVERT_UNARY_ACTIVATION(RELU, Relu);
NNADAPTER_LOG(INFO) << " Convert conv2d-relu success!";
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -15,6 +15,7 @@
#include "driver/intel_openvino/converter/converter.h"
#include <unistd.h>
#include <algorithm>
#include <utility>
#include <vector>
#include "utility/debug.h"
#include "utility/logging.h"
Expand Down Expand Up @@ -69,8 +70,8 @@ std::shared_ptr<OutputNode> Converter::UpdateOutputNodeMap(
core::Operand* operand, std::shared_ptr<OutputNode> output_node) {
auto it = output_nodes_->find(operand);
if (it == output_nodes_->end()) {
auto result = output_nodes_->insert(std::make_pair(
operand, std::vector<std::shared_ptr<OutputNode>>()));
auto result = output_nodes_->insert(
std::make_pair(operand, std::vector<std::shared_ptr<OutputNode>>()));
NNADAPTER_CHECK(result.second);
it = result.first;
}
Expand All @@ -87,16 +88,21 @@ std::shared_ptr<OutputNode> Converter::ConvertToOutputNode(
}
}
if (IsConstantOperand(operand)) {
auto constant_node = std::make_shared<default_opset::Constant>(ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions), operand->buffer);
std::shared_ptr<OutputNode> output_node = std::make_shared<OutputNode>(constant_node->output(0));
auto constant_node = std::make_shared<default_opset::Constant>(
ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions),
operand->buffer);
std::shared_ptr<OutputNode> output_node =
std::make_shared<OutputNode>(constant_node->output(0));
UpdateOutputNodeMap(operand, output_node);
return output_node;
} else if (IsModelInputOperand(operand)) {
auto parameter_node = std::make_shared<default_opset::Parameter>(ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions));
auto parameter_node = std::make_shared<default_opset::Parameter>(
ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions));
parameter_nodes_->push_back(parameter_node);
std::shared_ptr<OutputNode> output_node = std::make_shared<OutputNode>(parameter_node->output(0));
std::shared_ptr<OutputNode> output_node =
std::make_shared<OutputNode>(parameter_node->output(0));
UpdateOutputNodeMap(operand, output_node);
return output_node;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,32 +27,40 @@ class Converter {
public:
explicit Converter(
std::vector<std::shared_ptr<default_opset::Parameter>>* paramter_nodes,
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>* output_nodes) : parameter_nodes_(paramter_nodes), output_nodes_(output_nodes) {}

std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>*
output_nodes)
: parameter_nodes_(paramter_nodes), output_nodes_(output_nodes) {}

~Converter() {}

// Convert a NNAdapter model to an intel openvino graph
int Apply(core::Model* model);

// Convert a NNAdapter operand to an intel openvino OutputNode
std::shared_ptr<OutputNode> ConvertToOutputNode(core::Operand* operand, std::vector<int32_t> dimensions = {});
std::shared_ptr<OutputNode> ConvertToOutputNode(
core::Operand* operand, std::vector<int32_t> dimensions = {});

std::shared_ptr<OutputNode> UpdateOutputNodeMap(core::Operand* operand, std::shared_ptr<OutputNode> output_node);
std::shared_ptr<OutputNode> UpdateOutputNodeMap(
core::Operand* operand, std::shared_ptr<OutputNode> output_node);

std::shared_ptr<OutputNode> GetMappedOutputNode(core::Operand* operand);

template<typename T>
std::shared_ptr<OutputNode> AddUnsqueezeOutputNode(core::Operand* operand,
std::vector<size_t> dimensions, std::vector<T> axes) {
auto axes_node = AddConstOutputNode<T>(dimensions, axes);
auto y_node = ConvertToOutputNode(operand);
auto unsqueeze_node = std::make_shared<default_opset::Unsqueeze>(*y_node, *axes_node);
return std::make_shared<OutputNode>(unsqueeze_node->output(0));
template <typename T>
std::shared_ptr<OutputNode> AddUnsqueezeOutputNode(
core::Operand* operand,
std::vector<size_t> dimensions,
std::vector<T> axes) {
auto axes_node = AddConstOutputNode<T>(dimensions, axes);
auto y_node = ConvertToOutputNode(operand);
auto unsqueeze_node =
std::make_shared<default_opset::Unsqueeze>(*y_node, *axes_node);
return std::make_shared<OutputNode>(unsqueeze_node->output(0));
}

private:
std::vector<std::shared_ptr<default_opset::Parameter>>* parameter_nodes_;
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>* output_nodes_;
std::map<core::Operand*, std::vector<std::shared_ptr<OutputNode>>>*
output_nodes_;
};

} // namespace intel_openvino
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -22,7 +22,7 @@ namespace intel_openvino {

int ConvertElementwise(Converter* converter, core::Operation* operation) {
ELEMENTWISE_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to Intel OpenVINO's OutputNode
auto input0_node = converter->GetMappedOutputNode(input0_operand);
if (!input0_node) {
Expand All @@ -35,11 +35,12 @@ int ConvertElementwise(Converter* converter, core::Operation* operation) {
// Create <ElementWise> Node for Intel OpenVINO
std::shared_ptr<OutputNode> output_node{nullptr};
switch (operation->type) {
#define CONVERT_ELEMENTWISE(type, class_name) \
case NNADAPTER_##type: { \
std::shared_ptr<Node> node = std::make_shared<default_opset::class_name>(*input0_node, *input1_node); \
output_node = std::make_shared<OutputNode>(node->output(0)); \
converter->UpdateOutputNodeMap(output_operand, output_node); \
#define CONVERT_ELEMENTWISE(type, class_name) \
case NNADAPTER_##type: { \
std::shared_ptr<Node> node = std::make_shared<default_opset::class_name>( \
*input0_node, *input1_node); \
output_node = std::make_shared<OutputNode>(node->output(0)); \
converter->UpdateOutputNodeMap(output_operand, output_node); \
} break;
CONVERT_ELEMENTWISE(ADD, Add);
CONVERT_ELEMENTWISE(SUB, Subtract);
Expand All @@ -61,9 +62,10 @@ int ConvertElementwise(Converter* converter, core::Operation* operation) {
switch (fuse_code) {
#define CONVERT_UNARY_ACTIVATION(type, class_name) \
case NNADAPTER_FUSED_##type: { \
std::shared_ptr<Node> act_node = std::make_shared<default_opset::class_name>(*output_node); \
std::shared_ptr<Node> act_node = \
std::make_shared<default_opset::class_name>(*output_node); \
auto act_output_node = std::make_shared<OutputNode>(act_node->output(0)); \
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
converter->UpdateOutputNodeMap(output_operand, act_output_node); \
} break;
CONVERT_UNARY_ACTIVATION(RELU, Relu);
#undef CONVERT_UNARY_ACTIVATION
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "operation/mat_mul.h"
#include "driver/intel_openvino/converter/converter.h"
#include "operation/mat_mul.h"
#include "utility/debug.h"
#include "utility/logging.h"

Expand All @@ -33,8 +33,8 @@ int ConvertMatMul(Converter* converter, core::Operation* operation) {
y_node = converter->ConvertToOutputNode(y_operand);
}
// Create <MatMul> Node for Intel OpenVINO
std::shared_ptr<Node> node = std::make_shared<default_opset::MatMul>
(*x_node, *y_node, transpose_x, transpose_y);
std::shared_ptr<Node> node = std::make_shared<default_opset::MatMul>(
*x_node, *y_node, transpose_x, transpose_y);
auto output_node = std::make_shared<OutputNode>(node->output(0));
converter->UpdateOutputNodeMap(output_operand, output_node);
return NNADAPTER_NO_ERROR;
Expand Down
Loading

0 comments on commit da793e4

Please sign in to comment.