Skip to content

Commit

Permalink
[QualcommQnn] support models (#9412)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhupengyang authored Sep 8, 2022
1 parent 322f350 commit b5e5795
Show file tree
Hide file tree
Showing 27 changed files with 427 additions and 53 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <vector>
#include "operation/math/utility.h"

namespace nnadapter {
namespace operation {
namespace math {

template <typename T>
static int expand(const T* input_data,
const std::vector<int32_t>& input_shape,
T* output_data,
const std::vector<int32_t>& output_shape) {
std::vector<int> in_stride(input_shape.size(), 1);
std::vector<int> out_stride(output_shape.size(), 1);
for (int i = input_shape.size() - 2; i >= 0; --i) {
in_stride[i] = input_shape[i + 1] * in_stride[i + 1];
}
for (int i = output_shape.size() - 2; i >= 0; --i) {
out_stride[i] = output_shape[i + 1] * out_stride[i + 1];
}
int out_size = shape_production(output_shape);
for (int out_id = 0; out_id < out_size; ++out_id) {
int in_id = 0;
for (int i = input_shape.size() - 1; i >= 0; --i) {
int in_j = (out_id / out_stride[i]) % input_shape[i];
in_id += in_j * in_stride[i];
}
output_data[out_id] = input_data[in_id];
}
return 0;
}

} // namespace math
} // namespace operation
} // namespace nnadapter
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ class NCHW2NHWCDataLayoutConverter {
void ConvertAdaptivePool2D(core::Operation* operation);
void ConvertBatchNormalization(core::Operation* operation);
void ConvertCast(core::Operation* operation);
void ConvertChannelShuffle(core::Operation* operation);
void ConvertClip(core::Operation* operation);
void ConvertComparisons(core::Operation* operation);
void ConvertCumSum(core::Operation* operation);
Expand All @@ -55,6 +56,7 @@ class NCHW2NHWCDataLayoutConverter {
void ConvertLeakyRelu(core::Operation* operation);
void ConvertLpNormalization(core::Operation* operation);
void ConvertActivation(core::Operation* operation);
void ConvertPad(core::Operation* operation);
void ConvertPow(core::Operation* operation);
void ConvertQuantize(core::Operation* operation);
void ConvertReduce(core::Operation* operation);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "core/types.h"

namespace nnadapter {

void FuseUnsqueezePadSqueezeIntoPad(core::Model *model);

} // namespace nnadapter
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ class PatternMatcher {
int index = -1);
Pattern* IsOperationOutputOperand(NNAdapterOperationType type,
int index = -1);
Pattern* HasInLinksNum(int num);
Pattern* HasOutLinksNum(int num);
// Mark the pattern matched node to be deleted, so its inlinks and outlinks
// should be inside a matched subgraph.
Pattern* IsIntermediate();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ REGISTER_CONVERTER(ASSIGN, ConvertAssign)
REGISTER_CONVERTER(AVERAGE_POOL_2D, ConvertPool2D)
REGISTER_CONVERTER(BATCH_NORMALIZATION, ConvertBatchNormalization)
REGISTER_CONVERTER(CAST, ConvertCast)
REGISTER_CONVERTER(CLIP, ConvertClip)
REGISTER_CONVERTER(CHANNEL_SHUFFLE, ConvertChannelShuffle)
REGISTER_CONVERTER(CLIP, ConvertClip)
REGISTER_CONVERTER(CONCAT, ConvertConcat)
REGISTER_CONVERTER(CONV_2D, ConvertConv2D)
REGISTER_CONVERTER(CONV_2D_TRANSPOSE, ConvertConv2DTranspose)
Expand Down
48 changes: 46 additions & 2 deletions lite/backends/nnadapter/nnadapter/src/operation/expand.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "operation/expand.h"
#include <vector>
#include "core/types.h"
#include "operation/math/expand.h"
#include "utility/debug.h"
#include "utility/hints.h"
#include "utility/logging.h"
Expand Down Expand Up @@ -70,7 +71,7 @@ NNADAPTER_EXPORT void UpdateExpandInferOutputShape(
}

NNADAPTER_EXPORT bool ValidateExpand(const core::Operation* operation) {
return false;
return true;
}

NNADAPTER_EXPORT int PrepareExpand(core::Operation* operation) {
Expand Down Expand Up @@ -120,7 +121,50 @@ NNADAPTER_EXPORT int PrepareExpand(core::Operation* operation) {
}

NNADAPTER_EXPORT int ExecuteExpand(core::Operation* operation) {
return NNADAPTER_FEATURE_NOT_SUPPORTED;
EXPAND_OPERATION_EXTRACT_INPUTS_OUTPUTS

auto in_dims_data = input_operand->type.dimensions.data;
auto in_dims_count = input_operand->type.dimensions.count;
std::vector<int32_t> in_dims(in_dims_data, in_dims_data + in_dims_count);
auto out_dims_data = input_operand->type.dimensions.data;
auto out_dims_count = input_operand->type.dimensions.count;
std::vector<int32_t> out_dims(out_dims_data, out_dims_data + out_dims_count);
auto in_dtype_length =
GetOperandPrecisionDataLength(input_operand->type.precision);
auto output_buffer = AllocateOperand(output_operand);
int status = -1;
switch (in_dtype_length) {
case 1:
status = math::expand(reinterpret_cast<int8_t*>(input_operand->buffer),
in_dims,
static_cast<int8_t*>(output_buffer),
out_dims);
break;
case 2:
status = math::expand(reinterpret_cast<int16_t*>(input_operand->buffer),
in_dims,
static_cast<int16_t*>(output_buffer),
out_dims);
break;
case 4:
status = math::expand(reinterpret_cast<int32_t*>(input_operand->buffer),
in_dims,
static_cast<int32_t*>(output_buffer),
out_dims);
break;
case 8:
status = math::expand(reinterpret_cast<int64_t*>(input_operand->buffer),
in_dims,
static_cast<int64_t*>(output_buffer),
out_dims);
break;
default:
NNADAPTER_LOG(FATAL) << "Not support data type length: "
<< in_dtype_length;
break;
}
NNADAPTER_CHECK_EQ(status, 0);
return NNADAPTER_NO_ERROR;
}

} // namespace operation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,22 @@ void NCHW2NHWCDataLayoutConverter::ConvertCast(core::Operation* operation) {
SetPermutation(output_operand, input_permutation);
}

void NCHW2NHWCDataLayoutConverter::ConvertChannelShuffle(
core::Operation* operation) {
auto& input_operands = operation->input_operands;
auto& output_operands = operation->output_operands;
auto input_count = input_operands.size();
auto output_count = output_operands.size();
NNADAPTER_CHECK_EQ(input_count, 2);
NNADAPTER_CHECK_EQ(output_count, 1);
auto input_operand = input_operands[0];
auto output_operand = output_operands[0];
// The input and output operands share the same dimorder vector
auto input_permutation = GetPermutation(input_operand);
TransposeOperand(output_operand, input_permutation);
SetPermutation(output_operand, input_permutation);
}

void NCHW2NHWCDataLayoutConverter::ConvertClip(core::Operation* operation) {
auto& input_operands = operation->input_operands;
auto& output_operands = operation->output_operands;
Expand Down Expand Up @@ -632,6 +648,31 @@ void NCHW2NHWCDataLayoutConverter::ConvertLpNormalization(
SetPermutation(output_operand, input_permutation);
}

void NCHW2NHWCDataLayoutConverter::ConvertPad(core::Operation* operation) {
auto& input_operands = operation->input_operands;
auto& output_operands = operation->output_operands;
auto input_count = input_operands.size();
auto output_count = output_operands.size();
NNADAPTER_CHECK_EQ(input_count, 4);
NNADAPTER_CHECK_EQ(output_count, 1);
auto input_operand = input_operands[0];
auto output_operand = output_operands[0];
// The input and output operands share the same dimorder vector
auto input_permutation = GetPermutation(input_operand);
TransposeOperand(output_operand, input_permutation);
SetPermutation(output_operand, input_permutation);
// Trans pads
auto pads_data = reinterpret_cast<int32_t*>(input_operands[1]->buffer);
int32_t pads_size = input_operands[1]->length / sizeof(int32_t);
NNADAPTER_CHECK_EQ(input_permutation.size() * 2, pads_size);
std::vector<int32_t> trans_pads_data(pads_size);
for (int i = 0; i < input_permutation.size(); i++) {
trans_pads_data[i * 2] = pads_data[input_permutation[i] * 2];
trans_pads_data[i * 2 + 1] = pads_data[input_permutation[i] * 2 + 1];
}
memcpy(pads_data, trans_pads_data.data(), pads_size * sizeof(int32_t));
}

void NCHW2NHWCDataLayoutConverter::ConvertPow(core::Operation* operation) {
auto& input_operands = operation->input_operands;
auto& output_operands = operation->output_operands;
Expand Down Expand Up @@ -1218,6 +1259,9 @@ void NCHW2NHWCDataLayoutConverter::Apply(core::Model* model) {
case NNADAPTER_CAST:
ConvertCast(operation);
break;
case NNADAPTER_CHANNEL_SHUFFLE:
ConvertChannelShuffle(operation);
break;
case NNADAPTER_CLIP:
ConvertClip(operation);
break;
Expand Down Expand Up @@ -1271,6 +1315,9 @@ void NCHW2NHWCDataLayoutConverter::Apply(core::Model* model) {
case NNADAPTER_MAT_MUL:
ConvertMatMul(operation);
break;
case NNADAPTER_PAD:
ConvertPad(operation);
break;
case NNADAPTER_POW:
ConvertPow(operation);
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ NNADAPTER_EXPORT void ConvertQuantizationSymmToAsymm(core::Model* model) {
case NNADAPTER_LAYER_NORMALIZATION:
case NNADAPTER_LEAKY_RELU:
case NNADAPTER_MAX_POOL_2D:
case NNADAPTER_PAD:
case NNADAPTER_RELU:
case NNADAPTER_RELU6:
case NNADAPTER_RESHAPE:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "optimizer/fuse_reshape_transpose_reshape_into_channel_shuffle.h"
#include <algorithm>
#include <map>
#include <vector>
#include "optimizer/fuse_conv2d_activation_into_conv2d.h"
#include "optimizer/pattern_matcher.h"
#include "utility/debug.h"
#include "utility/logging.h"
Expand Down Expand Up @@ -91,8 +91,7 @@ void ReshapeTransposeReshapeFuser::BuildPattern() {
->IsIntermediate();
auto last_reshape_output_pattern =
CreatePattern("last_reshape_output")
->IsOperationOutputOperand(NNADAPTER_RESHAPE, 0)
->IsIntermediate();
->IsOperationOutputOperand(NNADAPTER_RESHAPE, 0);
// Create the topological connections for the above patterns
std::vector<Pattern*> first_reshape_input_patterns{
first_reshape_input_pattern, first_reshape_shape_pattern};
Expand Down
Loading

0 comments on commit b5e5795

Please sign in to comment.