-
Notifications
You must be signed in to change notification settings - Fork 1.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[NPU][RKNPU]in SSD network,calculate boxcoder boxes&variance in opt-time #5788
[NPU][RKNPU]in SSD network,calculate boxcoder boxes&variance in opt-time #5788
Conversation
Thanks for your contribution! |
} | ||
return strides; | ||
} | ||
void ConcatEliminator::ComputeConcat(const std::vector<lite::Tensor*> inputs, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
加下空行
namespace paddle { | ||
namespace lite { | ||
namespace mir { | ||
std::vector<size_t> ConcatEliminator::StrideNumel(const DDim& ddim) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
加下空行
} | ||
} | ||
void ConcatEliminator::BuildPattern() { | ||
auto* reshape2_output_0 = VarNode("reshape2_output_0") |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
concat只能匹配6个输入的情况吗?
auto out_stride = StrideNumel(output->dims()); | ||
void* dst = output->mutable_data<float>() + output_offset; | ||
const void* src = in->data<float>(); | ||
// src and dst tensor should have the same dims size. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
注释第一个字母大写
auto* concat_output_0 = | ||
VarNode("concat_output_0")->assert_is_op_output("concat", "Out"); | ||
|
||
// concat topology |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
删除无用的注释
auto reshape_instruct = matched.at("reshape2")->stmt(); | ||
auto op_desc = reshape_instruct->mutable_op_info(); | ||
auto* scope = reshape_instruct->op()->scope(); | ||
// get reshape's input tensor |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这些注释需要吗?
auto output_t = output_var->GetMutable<lite::Tensor>(); | ||
// get reshape's other attr | ||
|
||
// calcu reshape |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Calc reshape offline?
@@ -67,7 +67,7 @@ class FuseBase { | |||
void PerformPatternMatcher(SSAGraph* graph); | |||
|
|||
// Delete nodes that are marked as Intermediate | |||
void DeleteInterNodes(SSAGraph* graph); | |||
virtual void DeleteInterNodes(SSAGraph* graph); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
为什么需要改这个?
lite/core/optimizer.h
Outdated
@@ -112,6 +112,9 @@ class Optimizer { | |||
"lite_scale_activation_fuse_pass", // | |||
"lite_elementwise_scale_fuse_pass", // | |||
"lite_instance_norm_activation_fuse_pass", // | |||
"lite_priorbox_eliminate_pass", // |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
不要加lite_前缀
lite/core/program.cc
Outdated
@@ -76,6 +76,12 @@ void RuntimeProgram::SaveToProgram( | |||
if (it != origin_var_maps.end()) { | |||
v->SetType(it->second.GetType()); | |||
v->SetPersistable(it->second.Persistable()); | |||
if (it->second.GetType() == cpp::VarDesc::Type::LOD_TENSOR) { | |||
auto tensor = scope->FindVar(var_name)->GetMutable<Tensor>(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
是否需要判断下var是否存在?
auto var = scope->FindVar(var_name);
if (var != nullptr) {
auto tensor = var->GetMutable();
if (tensor != nullptr && (tensor->persistable() ^ v->Persistable())) {
v->SetPersistable(tensor->persistable());
}
}
…by review test=develop
…by review test=develop
lite/api/paddle_use_passes.h
Outdated
@@ -76,6 +76,7 @@ USE_MIR_PASS(control_flow_op_unused_inputs_and_outputs_eliminate_pass); | |||
USE_MIR_PASS(control_flow_op_shared_inputs_and_outputs_place_sync_pass); | |||
USE_MIR_PASS(lite_scale_activation_fuse_pass); | |||
USE_MIR_PASS(lite_instance_norm_activation_fuse_pass); | |||
USE_MIR_PASS(offline_calc_ssd_boxes_pass); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ssd_boxes_calc_offline_pass
lite/core/program.cc
Outdated
auto var = scope->FindVar(var_name); | ||
if (var != nullptr) { | ||
auto tensor = var->GetMutable<Tensor>(); | ||
if (tensor != nullptr && tensor->persistable() && |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
建议用抑或
…by review test=develop
…by review test=develop
…by review test=develop
…by review test=develop
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
test=develop
offilne-calculate boxcoder' boxes&variances, remove related priorbox & reshape or flatten & concat