aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp74
1 files changed, 74 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
new file mode 100644
index 0000000000..1ce9d2dac7
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "FakeQuantizationTestImpl.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+LayerTestResult<float, 2> FakeQuantizationTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ constexpr unsigned int width = 2;
+ constexpr unsigned int height = 3;
+
+ const armnn::TensorInfo tensorInfo({height, width },
+ armnn::DataType::Float32);
+
+ auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+ -10.0f, -5.0f,
+ 0.0f, 5.0f,
+ 10.0f, 10.0f
+ }));
+
+ LayerTestResult<float, 2> ret(tensorInfo);
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
+
+ armnn::FakeQuantizationQueueDescriptor data;
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
+
+ float min = -10.f;
+ float max = 10.f;
+
+ data.m_Parameters.m_Min = min;
+ data.m_Parameters.m_Max = max;
+
+ armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+ armnn::FakeQuantizationQueueDescriptor refData = data;
+ armnn::WorkloadInfo refInfo = info;
+ SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+
+ ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+ 0.0f, 63.0f,
+ 128.0f, 191.0f,
+ 255.0f, 255.0f
+ }));
+
+ return ret;
+}