@@ -10,36 +10,36 @@ namespace dl
10
10
namespace layer
11
11
{
12
12
/* *
13
- * @brief PReLU (input).
13
+ * @brief PRelu (input).
14
14
*
15
15
* @tparam feature_t supports int16_t and int8_t,
16
16
* - int16_t: stands for operation in int16_t quantize
17
17
* - int8_t: stands for operation in int8_t quantize
18
18
*/
19
19
template <typename feature_t >
20
- class PReLU : public Layer
20
+ class PRelu : public Layer
21
21
{
22
22
private:
23
- feature_t *activation_element; /* <! quantized alpha elements along channel axis >*/
23
+ const feature_t *activation_element; /* <! quantized alpha elements along channel axis >*/
24
24
int activation_exponent; /* <! exponent of quantized alpha elements >*/
25
25
Tensor<feature_t > *output; /* <! output ptr of prelu >*/
26
26
bool inplace; /* <! true: the output will store to input0
27
27
false: the output will store to a separate memory >*/
28
28
std::vector<int > output_shape; /* <! output shape of prelu >*/
29
29
public:
30
30
/* *
31
- * @brief Construct a new PReLU object
31
+ * @brief Construct a new PRelu object
32
32
*
33
33
* @param activation_element quantized alpha elements along channel axis
34
34
* @param activation_exponent exponent of quantized alpha elements
35
35
* @param name name of prelu
36
36
* @param inplace true: the output will store to input0
37
37
* false: the output will store to a separate memory
38
38
*/
39
- PReLU (const feature_t *activation_element,
39
+ PRelu (const feature_t *activation_element,
40
40
const int activation_exponent = 0 ,
41
- const char *name = NULL ,
42
- bool inplace = " PReLU " ) : Layer(name),
41
+ const char *name = " PRelu " ,
42
+ bool inplace = false ) : Layer(name),
43
43
activation_element (activation_element),
44
44
activation_exponent(activation_exponent),
45
45
output(NULL ),
@@ -49,10 +49,10 @@ namespace dl
49
49
}
50
50
51
51
/* *
52
- * @brief Destroy the PReLU object
52
+ * @brief Destroy the PRelu object
53
53
*
54
54
*/
55
- ~PReLU ()
55
+ ~PRelu ()
56
56
{
57
57
if ((!this ->inplace ) && (this ->output != NULL ))
58
58
{
@@ -71,7 +71,7 @@ namespace dl
71
71
this ->output_shape = input.shape ;
72
72
if (!this ->inplace )
73
73
{
74
- if (this ->output ! = NULL )
74
+ if (this ->output = = NULL )
75
75
{
76
76
this ->output = new Tensor<feature_t >;
77
77
}
@@ -94,19 +94,19 @@ namespace dl
94
94
/* *
95
95
* @brief Get the output
96
96
*
97
- * @return Tensor<feature_t>& PReLU result
97
+ * @return Tensor<feature_t>& PRelu result
98
98
*/
99
99
Tensor<feature_t > &get_output ()
100
100
{
101
101
return *this ->output ;
102
102
}
103
103
104
104
/* *
105
- * @brief Call PReLU operation.
105
+ * @brief Call PRelu operation.
106
106
*
107
107
* @param input as an input
108
108
* @param assign_core not effective yet
109
- * @return PReLU result
109
+ * @return PRelu result
110
110
*/
111
111
Tensor<feature_t > &call (Tensor<feature_t > &input, const std::vector<int > &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
112
112
{
@@ -125,7 +125,7 @@ namespace dl
125
125
126
126
DL_LOG_LAYER_LATENCY_START ();
127
127
nn::prelu (*this ->output , input, this ->activation_element , this ->activation_exponent , assign_core);
128
- DL_LOG_LAYER_LATENCY_END (this ->name , " leakyrelu " );
128
+ DL_LOG_LAYER_LATENCY_END (this ->name , " prelu " );
129
129
}
130
130
else
131
131
{
@@ -135,7 +135,7 @@ namespace dl
135
135
this ->output ->set_shape (this ->output_shape );
136
136
}
137
137
nn::prelu (*this ->output , input, this ->activation_element , this ->activation_exponent , assign_core);
138
- DL_LOG_LAYER_LATENCY_END (this ->name , " leakyrelu " );
138
+ DL_LOG_LAYER_LATENCY_END (this ->name , " prelu " );
139
139
}
140
140
141
141
return *this ->output ;
0 commit comments