13 #include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h> 17 using namespace armcomputetensorutils;
28 const arm_compute::TensorInfo aclInputInfo =
29 armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.
m_DataLayout);
30 const arm_compute::TensorInfo aclOutputInfo =
31 armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.
m_DataLayout);
32 const arm_compute::TensorInfo aclMeanInfo =
33 armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.
m_DataLayout);
34 const arm_compute::TensorInfo aclVarInfo =
35 armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.
m_DataLayout);
36 const arm_compute::TensorInfo aclBetaInfo =
37 armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.
m_DataLayout);
38 const arm_compute::TensorInfo aclGammaInfo =
39 armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.
m_DataLayout);
41 return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo,
56 arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
57 arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(
m_Data.
m_Outputs[0])->GetTensor();
60 input.info()->set_data_layout(aclDataLayout);
61 output.info()->set_data_layout(aclDataLayout);
63 m_Mean = std::make_unique<arm_compute::Tensor>();
66 m_Variance = std::make_unique<arm_compute::Tensor>();
69 m_Gamma = std::make_unique<arm_compute::Tensor>();
72 m_Beta = std::make_unique<arm_compute::Tensor>();
75 auto layer = std::make_unique<arm_compute::NEBatchNormalizationLayer>();
76 layer->configure(&input,
83 m_Layer.reset(layer.release());
102 void NeonBatchNormalizationWorkload::FreeUnusedTensors()
104 FreeTensorIfUnused(m_Mean);
105 FreeTensorIfUnused(m_Variance);
106 FreeTensorIfUnused(m_Gamma);
107 FreeTensorIfUnused(m_Beta);
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor)
const BatchNormalizationQueueDescriptor m_Data
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
const ConstCpuTensorHandle * m_Mean
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstCpuTensorHandle * m_Variance
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
LayerDescriptor m_Parameters
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstCpuTensorHandle *handle)
virtual void Execute() const override
NeonBatchNormalizationWorkload(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo() const
A BatchNormalizationDescriptor for the BatchNormalizationLayer.