aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h')
-rw-r--r--arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h36
1 files changed, 21 insertions, 15 deletions
diff --git a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
index 4f8a8da1fb..07a2cdd8b8 100644
--- a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
+++ b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
@@ -67,15 +67,18 @@ public:
* @param[in] fused_act Activation layer information in case of a fused activation.
*
*/
- void configure(TensorType *input,
- TensorType *weights,
- TensorType *bias,
- TensorType *output,
- const TensorType *mean,
- const TensorType *var,
- const TensorType *beta,
- const TensorType *gamma,
- float epsilon, const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo const &fused_act)
+ void configure(TensorType *input,
+ TensorType *weights,
+ TensorType *bias,
+ TensorType *output,
+ const TensorType *mean,
+ const TensorType *var,
+ const TensorType *beta,
+ const TensorType *gamma,
+ float epsilon,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ ActivationLayerInfo const &fused_act)
{
// We don't run any validate, as we assume that the layers have been already validated
const bool has_bias = (bias != nullptr);
@@ -83,20 +86,23 @@ public:
// We check if the layer has a bias. If yes, use it in-place. If not, we need to create one
// as batch normalization might end up with a bias != 0
- if(has_bias)
+ if (has_bias)
{
- _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
+ _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon,
+ FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
bias_to_use = bias;
}
else
{
- _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
+ _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon,
+ FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
bias_to_use = &_fused_bias;
}
- _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier, fused_act.enabled() ? fused_act : ActivationLayerInfo());
+ _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier,
+ fused_act.enabled() ? fused_act : ActivationLayerInfo());
- if(!has_bias)
+ if (!has_bias)
{
_fused_bias.allocator()->allocate();
}
@@ -111,7 +117,7 @@ public:
void prepare()
{
- if(!_is_prepared)
+ if (!_is_prepared)
{
_fused_batch_norm_layer.run();
_is_prepared = true;