17 #include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h> 21 using namespace armcomputetensorutils;
33 const arm_compute::TensorInfo aclInputInfo =
34 armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.
m_DataLayout);
35 const arm_compute::TensorInfo aclOutputInfo =
36 armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.
m_DataLayout);
37 const arm_compute::TensorInfo aclMeanInfo =
38 armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.
m_DataLayout);
39 const arm_compute::TensorInfo aclVarInfo =
40 armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.
m_DataLayout);
41 const arm_compute::TensorInfo aclBetaInfo =
42 armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.
m_DataLayout);
43 const arm_compute::TensorInfo aclGammaInfo =
44 armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.
m_DataLayout);
47 activationDescriptor);
49 return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo,
65 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
66 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Outputs[0])->GetTensor();
69 input.info()->set_data_layout(aclDataLayout);
70 output.info()->set_data_layout(aclDataLayout);
72 m_Mean = std::make_unique<arm_compute::Tensor>();
75 m_Variance = std::make_unique<arm_compute::Tensor>();
78 m_Gamma = std::make_unique<arm_compute::Tensor>();
81 m_Beta = std::make_unique<arm_compute::Tensor>();
86 auto layer = std::make_unique<arm_compute::NEBatchNormalizationLayer>();
87 layer->configure(&input,
95 m_Layer.reset(layer.release());
114 void NeonBatchNormalizationWorkload::FreeUnusedTensors()
116 FreeTensorIfUnused(m_Mean);
117 FreeTensorIfUnused(m_Variance);
118 FreeTensorIfUnused(m_Gamma);
119 FreeTensorIfUnused(m_Beta);
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
const BatchNormalizationQueueDescriptor m_Data
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
const ConstCpuTensorHandle * m_Mean
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstCpuTensorHandle * m_Variance
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2021 ARM Limited and Contributors.
LayerDescriptor m_Parameters
An ActivationDescriptor for the ActivationLayer.
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstCpuTensorHandle *handle)
virtual void Execute() const override
NeonBatchNormalizationWorkload(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo() const
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)