25 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
32 const std::vector<float>& inputValues,
33 const std::vector<float>& expectedOutputValues,
39 std::vector<T> inputTensor = armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset);
40 std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
43 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
44 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
48 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
49 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
51 std::unique_ptr<armnn::IWorkload> workload
54 inputHandle->Allocate();
55 outputHandle->Allocate();
65 outputHandle->GetShape(),
69 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
86 std::vector<float> inputValues
109 std::vector<float> expectedOutputValues
121 0.99995005f, -0.7337929f,
123 -0.99995005f, 0.52413774f,
126 -0.99995005f, -1.1531031f,
128 0.99995005f, 1.3627582f
143 return InstanceNormTestImpl<ArmnnType>(
150 expectedOutputValues,
154 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
171 std::vector<float> inputValues
194 std::vector<float> expectedOutputValues
229 return InstanceNormTestImpl<ArmnnType>(
236 expectedOutputValues,
248 return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
257 return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
266 return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
275 return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
const TensorShape & GetShape() const
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
LayerTestResult< float, 4 > InstanceNormFloat32Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
LayerDescriptor m_Parameters
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< armnn::Half, 4 > InstanceNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
LayerTestResult< float, 4 > InstanceNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout dataLayout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const