aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-04 13:10:16 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-10-09 10:43:00 +0000
commit8168f407f0f2715250f388089f26ed39683ac00a (patch)
treefeae8f29b6910aba496141c5672a2fdac0b8efe5 /src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
parent784db773ec0fb32562e8889b769bc04450159161 (diff)
downloadarmnn-8168f407f0f2715250f388089f26ed39683ac00a.tar.gz
IVGCVSW-3889 Add CL workload for INSTANCE_NORMALIZATION
!android-nn-driver:2039 Signed-off-by: Kevin May <kevin.may@arm.com> Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I621dd80920b58b8b795ed13917b88850519c8177
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp267
1 files changed, 267 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
new file mode 100644
index 0000000000..4e9cbbf40d
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -0,0 +1,267 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "InstanceNormalizationTestImpl.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> InstanceNormTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const std::vector<float>& inputValues,
+ const std::vector<float>& expectedOutputValues,
+ armnn::InstanceNormalizationQueueDescriptor descriptor,
+ float qScale = 0.0f,
+ int32_t qOffset = 0)
+{
+ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputValues));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+
+ result.outputExpected =
+ MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::WorkloadInfo info;
+
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+ return result;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> InstanceNormTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ // BatchSize: 2
+ // Height: 2
+ // Width: 2
+ // Channels: 2
+
+ const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
+
+ armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 0.f, 1.f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 0.f, 2.f,
+
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 0.f, 2.f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 0.f, 4.f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 1.f, -1.f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ -1.f, 2.f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ -1.f, -2.f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 1.f, 4.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 0.f, -1.1470304f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 0.f, -0.22940612f,
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 0.f, -0.22940612f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 0.f, 1.6058424f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 0.99995005f, -0.7337929f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ -0.99995005f, 0.52413774f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ -0.99995005f, -1.1531031f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 0.99995005f, 1.3627582f
+ };
+
+ if (dataLayout == armnn::DataLayout::NCHW)
+ {
+ PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
+ PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
+ }
+
+ armnn::InstanceNormalizationQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Eps = 0.0001f;
+ descriptor.m_Parameters.m_Beta = 0.0f;
+ descriptor.m_Parameters.m_Gamma = 1.0f;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+
+ return InstanceNormTestImpl<ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> InstanceNormTest2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ // BatchSize: 2
+ // Height: 2
+ // Width: 2
+ // Channels: 2
+
+ const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
+
+ armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 0.f, 1.f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 0.f, 2.f,
+
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 0.f, 2.f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 0.f, 4.f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 1.f, -1.f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ -1.f, 2.f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ -1.f, -2.f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 1.f, 4.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 10.f, 7.7059393f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 10.f, 9.541187f,
+
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 10.f, 9.541187f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 10.f, 13.211685f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 11.9999f, 8.532414f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ 8.0001f, 11.048275f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ 8.0001f, 7.693794f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 11.9999f, 12.725516f
+ };
+
+ if (dataLayout == armnn::DataLayout::NCHW)
+ {
+ PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
+ PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
+ }
+
+ armnn::InstanceNormalizationQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Eps = 0.0001f;
+ descriptor.m_Parameters.m_Beta = 10.0f;
+ descriptor.m_Parameters.m_Gamma = 2.0f;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+
+ return InstanceNormTestImpl<ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ descriptor);
+}
+
+} // anonymous namespace
+
+LayerTestResult<float, 4> InstanceNormFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
+}
+
+LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
+}
+
+LayerTestResult<float, 4> InstanceNormFloat32Test2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
+}
+
+LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
+}