-
Notifications
You must be signed in to change notification settings - Fork 0
/
reduction.cpp
87 lines (72 loc) · 1.81 KB
/
reduction.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#include "reduction.h"
#include "layer_register.h"
#include "utils.h"
#include "omp.h"
Reduction::Reduction()
{
_reduce_type = -1;
}
Reduction::~Reduction()
{
}
int Reduction::load_model(const vector<string> ¶ms, FILE* fp)
{
vector<string> reduce_param = split(params[6], "=");
if(reduce_param[1] == "Mean")
{
_reduce_type = 0;
}
vector<string> dim_param = split(params[7], "=");
_dim = atoi(dim_param[1].c_str());
return 0;
}
void Reduction::forward(vector<Tensor*> &input, vector<Tensor*> &output)
{
Tensor* result;
if(output[0] == nullptr)
{
result = new Tensor();
}
else
{
result = output[0];
}
vector<float>* inputData = input[0]->get_data();
vector<float>* outputData = result->get_data();
if(_reduce_type == 0)
{
//TODO:处理维度更多情况
vector<int> inputShape = input[0]->get_shape();
vector<int> outputShape;
int blocks = 1;
for(int i=0; i<_dim; i++)
{
outputShape.push_back(inputShape[i]);
blocks *= inputShape[i];
}
int stride = 1;
for(int i=_dim; i<inputShape.size(); i++)
{
stride *= inputShape[i];
}
result->set_shape(outputShape);
omp_set_max_active_levels(2);
//#pragma omp parallel for
for(int i=0; i<blocks; i++)
{
float sum = 0.0f;
for(int j=0; j<stride; j++)
{
sum += inputData->data()[i*stride+j];
}
outputData->data()[i] = sum / stride;
}
output[0] = result;
}
}
int Reduction::CreateInstance(Layer* &layer)
{
layer = new Reduction();
return 0;
}
LayerRegistererWrapper reductionCreateInstance("Reduction_t", Reduction::CreateInstance);