aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-09-09 14:31:21 +0100
committerMatthew Jackson <matthew.jackson@arm.com>2019-09-10 09:46:49 +0000
commite69c399dcee1e75ebf9b2b12f72f3ad628c4e104 (patch)
treecaa3c3739723483b5db8c19872b6af13cac74db5 /src
parent914e4db5a9083e922d89f133672fd44e92016e96 (diff)
downloadarmnn-e69c399dcee1e75ebf9b2b12f72f3ad628c4e104.tar.gz
IVGCVSW-3824 Implement Float 16 Encoder and Decoder
* Implement Float 16 Encoder and Decoder * Add Stack Float 16 layer and create workload tests Signed-off-by: Matthew Jackson <matthew.jackson@arm.com> Change-Id: Ice4678226f4d22c06ebcc6db3052d42ce0c1bd67
Diffstat (limited to 'src')
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt1
-rw-r--r--src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp611
-rw-r--r--src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp482
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp5
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp13
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp7
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp12
-rw-r--r--src/backends/reference/RefLayerSupport.cpp3
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp4
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp13
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp57
-rw-r--r--src/backends/reference/workloads/Decoders.hpp15
-rw-r--r--src/backends/reference/workloads/Encoders.hpp6
14 files changed, 726 insertions, 504 deletions
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index f75870adb7..39e026518f 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -66,6 +66,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/SpaceToBatchNdTestImpl.cpp \
test/layerTests/SpaceToDepthTestImpl.cpp \
test/layerTests/SplitterTestImpl.cpp \
+ test/layerTests/StackTestImpl.cpp \
test/layerTests/StridedSliceTestImpl.cpp \
test/layerTests/SubtractionTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 49604b0d3b..e46d48145a 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -113,6 +113,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/SpaceToDepthTestImpl.hpp
layerTests/SplitterTestImpl.cpp
layerTests/SplitterTestImpl.hpp
+ layerTests/StackTestImpl.cpp
layerTests/StackTestImpl.hpp
layerTests/StridedSliceTestImpl.cpp
layerTests/StridedSliceTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
new file mode 100644
index 0000000000..80058c6ea5
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
@@ -0,0 +1,611 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StackTestImpl.hpp"
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+template<armnn::DataType ArmnnType, typename T, std::size_t outputDimLength>
+LayerTestResult<T, outputDimLength> StackTestHelper(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ unsigned int axis,
+ const std::vector<std::vector<T>>& inputData,
+ const std::vector<T>& outputExpectedData)
+{
+ unsigned int numInputs = static_cast<unsigned int>(inputData.size());
+ std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ inputs.push_back(MakeTensor<T, outputDimLength-1>(inputTensorInfo, inputData[i]));
+ }
+
+ LayerTestResult<T, outputDimLength> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, outputDimLength>(outputTensorInfo, outputExpectedData);
+
+ std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ inputHandles.push_back(workloadFactory.CreateTensorHandle(inputTensorInfo));
+ }
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::StackQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Axis = axis;
+ descriptor.m_Parameters.m_InputShape = inputTensorInfo.GetShape();
+ descriptor.m_Parameters.m_NumInputs = numInputs;
+
+ armnn::WorkloadInfo info;
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ std::unique_ptr<armnn::ITensorHandle>& inputHandle = inputHandles[i];
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ inputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), inputs[i].origin());
+ }
+
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+ outputHandle->Allocate();
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateStack(descriptor, info);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+
+ return result;
+}
+
+} // anonymous namespace
+
+//
+// Implementation templates
+//
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StackAxis0TestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 2, 3, 2, 3 }, ArmnnType);
+
+ std::vector<std::vector<T>> inputData;
+
+ inputData.push_back(
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 13, 14, 15,
+ 16, 17, 18
+ });
+
+ inputData.push_back(
+ {
+ 19, 20, 21,
+ 22, 23, 24,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36
+ });
+
+ std::vector<T> outputExpectedData =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 13, 14, 15,
+ 16, 17, 18,
+
+
+ 19, 20, 21,
+ 22, 23, 24,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36
+ };
+
+ return StackTestHelper<ArmnnType, T, 4>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 0U,
+ inputData,
+ outputExpectedData
+ );
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StackOutput4DAxis1TestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 3, 2, 2, 3 }, ArmnnType);
+
+ std::vector<std::vector<T>> inputData;
+
+ inputData.push_back(
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 13, 14, 15,
+ 16, 17, 18
+ });
+
+ inputData.push_back(
+ {
+ 19, 20, 21,
+ 22, 23, 24,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36
+ });
+
+ std::vector<T> outputExpectedData =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 19, 20, 21,
+ 22, 23, 24,
+
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+
+ 13, 14, 15,
+ 16, 17, 18,
+
+ 31, 32, 33,
+ 34, 35, 36
+ };
+
+ return StackTestHelper<ArmnnType, T, 4>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 1U,
+ inputData,
+ outputExpectedData
+ );
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StackOutput4DAxis2TestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 3, 2, 2, 3 }, ArmnnType);
+
+ std::vector<std::vector<T>> inputData;
+
+ inputData.push_back(
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 13, 14, 15,
+ 16, 17, 18
+ });
+
+ inputData.push_back(
+ {
+ 19, 20, 21,
+ 22, 23, 24,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36
+ });
+
+ std::vector<T> outputExpectedData =
+ {
+ 1, 2, 3,
+ 19, 20, 21,
+
+ 4, 5, 6,
+ 22, 23, 24,
+
+ 7, 8, 9,
+ 25, 26, 27,
+
+ 10, 11, 12,
+ 28, 29, 30,
+
+ 13, 14, 15,
+ 31, 32, 33,
+
+ 16, 17, 18,
+ 34, 35, 36
+ };
+
+ return StackTestHelper<ArmnnType, T, 4>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 2U,
+ inputData,
+ outputExpectedData
+ );
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StackOutput4DAxis3TestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 3, 2, 3, 2 }, ArmnnType);
+
+ std::vector<std::vector<T>> inputData;
+
+ inputData.push_back(
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+ 13, 14, 15,
+ 16, 17, 18
+ });
+
+ inputData.push_back(
+ {
+ 19, 20, 21,
+ 22, 23, 24,
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36
+ });
+
+ std::vector<T> outputExpectedData =
+ {
+ 1, 19,
+ 2, 20,
+ 3, 21,
+
+ 4, 22,
+ 5, 23,
+ 6, 24,
+
+
+ 7, 25,
+ 8, 26,
+ 9, 27,
+
+ 10, 28,
+ 11, 29,
+ 12, 30,
+
+
+ 13, 31,
+ 14, 32,
+ 15, 33,
+
+ 16, 34,
+ 17, 35,
+ 18, 36
+ };
+
+ return StackTestHelper<ArmnnType, T, 4>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 3U,
+ inputData,
+ outputExpectedData
+ );
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> StackOutput3DInputs3TestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo ({ 3, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 3, 3, 3 }, ArmnnType);
+
+ std::vector<std::vector<T>> inputData;
+
+ inputData.push_back(
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ });
+
+ inputData.push_back(
+ {
+ 10, 11, 12,
+ 13, 14, 15,
+ 16, 17, 18
+ });
+
+ inputData.push_back(
+ {
+ 19, 20, 21,
+ 22, 23, 24,
+ 25, 26, 27
+ });
+
+ std::vector<T> outputExpectedData =
+ {
+ 1, 2, 3,
+ 10, 11, 12,
+ 19, 20, 21,
+
+ 4, 5, 6,
+ 13, 14, 15,
+ 22, 23, 24,
+
+ 7, 8, 9,
+ 16, 17, 18,
+ 25, 26, 27
+ };
+
+ return StackTestHelper<ArmnnType, T, 3>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 1U,
+ inputData,
+ outputExpectedData
+ );
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 5> StackOutput5DTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo ({ 2, 2, 2, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 2, 2, 2, 2, 3 }, ArmnnType);
+
+ std::vector<std::vector<T>> inputData;
+
+ inputData.push_back(
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+
+ 13, 14, 15,
+ 16, 17, 18,
+
+ 19, 20, 21,
+ 22, 23, 24
+ });
+
+ inputData.push_back(
+ {
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36,
+
+
+ 37, 38, 39,
+ 40, 41, 42,
+
+ 43, 44, 45,
+ 46, 47, 48
+ });
+
+ std::vector<T> outputExpectedData =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36,
+
+
+
+ 13, 14, 15,
+ 16, 17, 18,
+
+ 19, 20, 21,
+ 22, 23, 24,
+
+
+ 37, 38, 39,
+ 40, 41, 42,
+
+ 43, 44, 45,
+ 46, 47, 48
+
+ };
+
+ return StackTestHelper<ArmnnType, T, 5>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 1U,
+ inputData,
+ outputExpectedData
+ );
+}
+
+//
+// Implementation functions
+//
+
+LayerTestResult<float, 4> StackAxis0Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return StackAxis0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 4> StackOutput4DAxis1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return StackOutput4DAxis1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 4> StackOutput4DAxis2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return StackOutput4DAxis2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 4> StackOutput4DAxis3Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return StackOutput4DAxis3TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 3> StackOutput3DInputs3Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return StackOutput3DInputs3TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 5> StackOutput5DFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return StackOutput5DTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<armnn::Half, 4> StackFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ using namespace half_float::literal;
+
+ armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, armnn::DataType::Float16);
+ armnn::TensorInfo outputTensorInfo({ 3, 2, 2, 3 }, armnn::DataType::Float16);
+
+ std::vector<std::vector<armnn::Half>> inputData;
+
+ inputData.push_back(
+ {
+ 1.0_h, 2.0_h, 3.0_h,
+ 4.0_h, 5.0_h, 6.0_h,
+
+ 7.0_h, 8.0_h, 9.0_h,
+ 10.0_h, 11.0_h, 12.0_h,
+
+ 13.0_h, 14.0_h, 15.0_h,
+ 16.0_h, 17.0_h, 18.0_h
+ });
+
+ inputData.push_back(
+ {
+ 19.0_h, 20.0_h, 21.0_h,
+ 22.0_h, 23.0_h, 24.0_h,
+
+ 25.0_h, 26.0_h, 27.0_h,
+ 28.0_h, 29.0_h, 30.0_h,
+
+ 31.0_h, 32.0_h, 33.0_h,
+ 34.0_h, 35.0_h, 36.0_h
+ });
+
+ std::vector<armnn::Half> outputExpectedData =
+ {
+ 1.0_h, 2.0_h, 3.0_h,
+ 19.0_h, 20.0_h, 21.0_h,
+
+ 4.0_h, 5.0_h, 6.0_h,
+ 22.0_h, 23.0_h, 24.0_h,
+
+ 7.0_h, 8.0_h, 9.0_h,
+ 25.0_h, 26.0_h, 27.0_h,
+
+ 10.0_h, 11.0_h, 12.0_h,
+ 28.0_h, 29.0_h, 30.0_h,
+
+ 13.0_h, 14.0_h, 15.0_h,
+ 31.0_h, 32.0_h, 33.0_h,
+
+ 16.0_h, 17.0_h, 18.0_h,
+ 34.0_h, 35.0_h, 36.0_h
+ };
+
+ return StackTestHelper<armnn::DataType::Float16, armnn::Half, 4>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 2U,
+ inputData,
+ outputExpectedData
+ );
+} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp
index f063fbb737..a2eb3a12cc 100644
--- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -14,482 +14,30 @@
#include <backendsCommon/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/test/TensorCopyUtils.hpp>
-#include <backendsCommon/test/WorkloadTestUtils.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-namespace
-{
-
-template<armnn::DataType ArmnnType, typename T, std::size_t outputDimLength>
-LayerTestResult<T, outputDimLength> StackTestHelper(
+LayerTestResult<float, 4> StackAxis0Float32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::TensorInfo& inputTensorInfo,
- const armnn::TensorInfo& outputTensorInfo,
- unsigned int axis,
- const std::vector<std::vector<T>>& inputData,
- const std::vector<T>& outputExpectedData)
-{
- unsigned int numInputs = static_cast<unsigned int>(inputData.size());
- std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
- for (unsigned int i = 0; i < numInputs; ++i)
- {
- inputs.push_back(MakeTensor<T, outputDimLength-1>(inputTensorInfo, inputData[i]));
- }
-
- LayerTestResult<T, outputDimLength> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, outputDimLength>(outputTensorInfo, outputExpectedData);
-
- std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
- for (unsigned int i = 0; i < numInputs; ++i)
- {
- inputHandles.push_back(workloadFactory.CreateTensorHandle(inputTensorInfo));
- }
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::StackQueueDescriptor descriptor;
- descriptor.m_Parameters.m_Axis = axis;
- descriptor.m_Parameters.m_InputShape = inputTensorInfo.GetShape();
- descriptor.m_Parameters.m_NumInputs = numInputs;
-
- armnn::WorkloadInfo info;
- for (unsigned int i = 0; i < numInputs; ++i)
- {
- std::unique_ptr<armnn::ITensorHandle>& inputHandle = inputHandles[i];
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- inputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), inputs[i].origin());
- }
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- outputHandle->Allocate();
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateStack(descriptor, info);
-
- workload->Execute();
-
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
-
- return result;
-}
-
-} // anonymous namespace
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Stack0AxisTest(
+LayerTestResult<float, 4> StackOutput4DAxis1Float32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ 2, 3, 2, 3 }, ArmnnType);
-
- std::vector<std::vector<T>> inputData;
-
- inputData.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputData.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<T> outputExpectedData =
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18,
-
-
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- };
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
- return StackTestHelper<ArmnnType, T, 4>(
- workloadFactory,
- memoryManager,
- inputTensorInfo,
- outputTensorInfo,
- 0U,
- inputData,
- outputExpectedData
- );
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Stack4dOutput1AxisTest(
+LayerTestResult<float, 4> StackOutput4DAxis2Float32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ 3, 2, 2, 3 }, ArmnnType);
-
- std::vector<std::vector<T>> inputData;
-
- inputData.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputData.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<T> outputExpectedData =
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 19, 20, 21,
- 22, 23, 24,
-
-
- 7, 8, 9,
- 10, 11, 12,
-
- 25, 26, 27,
- 28, 29, 30,
-
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
- 13, 14, 15,
- 16, 17, 18,
-
- 31, 32, 33,
- 34, 35, 36
- };
-
- return StackTestHelper<ArmnnType, T, 4>(
- workloadFactory,
- memoryManager,
- inputTensorInfo,
- outputTensorInfo,
- 1U,
- inputData,
- outputExpectedData
- );
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Stack4dOutput2AxisTest(
+LayerTestResult<float, 4> StackOutput4DAxis3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ 3, 2, 2, 3 }, ArmnnType);
-
- std::vector<std::vector<T>> inputData;
-
- inputData.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputData.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<T> outputExpectedData =
- {
- 1, 2, 3,
- 19, 20, 21,
-
- 4, 5, 6,
- 22, 23, 24,
-
- 7, 8, 9,
- 25, 26, 27,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
- 10, 11, 12,
- 28, 29, 30,
-
- 13, 14, 15,
- 31, 32, 33,
-
- 16, 17, 18,
- 34, 35, 36
- };
-
- return StackTestHelper<ArmnnType, T, 4>(
- workloadFactory,
- memoryManager,
- inputTensorInfo,
- outputTensorInfo,
- 2U,
- inputData,
- outputExpectedData
- );
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Stack4dOutput3AxisTest(
+LayerTestResult<float, 3> StackOutput3DInputs3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo ({ 3, 2, 3 }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ 3, 2, 3, 2 }, ArmnnType);
-
- std::vector<std::vector<T>> inputData;
-
- inputData.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputData.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36
- });
-
- std::vector<T> outputExpectedData =
- {
- 1, 19,
- 2, 20,
- 3, 21,
-
- 4, 22,
- 5, 23,
- 6, 24,
-
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
- 7, 25,
- 8, 26,
- 9, 27,
-
- 10, 28,
- 11, 29,
- 12, 30,
-
-
- 13, 31,
- 14, 32,
- 15, 33,
-
- 16, 34,
- 17, 35,
- 18, 36
- };
-
- return StackTestHelper<ArmnnType, T, 4>(
- workloadFactory,
- memoryManager,
- inputTensorInfo,
- outputTensorInfo,
- 3U,
- inputData,
- outputExpectedData
- );
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Stack3dOutput1Axis3InputTest(
+LayerTestResult<float, 5> StackOutput5DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo ({ 3, 3 }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ 3, 3, 3 }, ArmnnType);
-
- std::vector<std::vector<T>> inputData;
-
- inputData.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
- 7, 8, 9
- });
-
- inputData.push_back(
- {
- 10, 11, 12,
- 13, 14, 15,
- 16, 17, 18
- });
-
- inputData.push_back(
- {
- 19, 20, 21,
- 22, 23, 24,
- 25, 26, 27
- });
-
- std::vector<T> outputExpectedData =
- {
- 1, 2, 3,
- 10, 11, 12,
- 19, 20, 21,
-
- 4, 5, 6,
- 13, 14, 15,
- 22, 23, 24,
-
- 7, 8, 9,
- 16, 17, 18,
- 25, 26, 27
- };
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
- return StackTestHelper<ArmnnType, T, 3>(
- workloadFactory,
- memoryManager,
- inputTensorInfo,
- outputTensorInfo,
- 1U,
- inputData,
- outputExpectedData
- );
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 5> Stack5dOutputTest(
+LayerTestResult<armnn::Half, 4> StackFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo ({ 2, 2, 2, 3 }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ 2, 2, 2, 2, 3 }, ArmnnType);
-
- std::vector<std::vector<T>> inputData;
-
- inputData.push_back(
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
-
- 13, 14, 15,
- 16, 17, 18,
-
- 19, 20, 21,
- 22, 23, 24
- });
-
- inputData.push_back(
- {
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36,
-
-
- 37, 38, 39,
- 40, 41, 42,
-
- 43, 44, 45,
- 46, 47, 48
- });
-
- std::vector<T> outputExpectedData =
- {
- 1, 2, 3,
- 4, 5, 6,
-
- 7, 8, 9,
- 10, 11, 12,
-
-
- 25, 26, 27,
- 28, 29, 30,
-
- 31, 32, 33,
- 34, 35, 36,
-
-
-
- 13, 14, 15,
- 16, 17, 18,
-
- 19, 20, 21,
- 22, 23, 24,
-
-
- 37, 38, 39,
- 40, 41, 42,
-
- 43, 44, 45,
- 46, 47, 48
-
- };
-
- return StackTestHelper<ArmnnType, T, 5>(
- workloadFactory,
- memoryManager,
- inputTensorInfo,
- outputTensorInfo,
- 1U,
- inputData,
- outputExpectedData
- );
-}
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 833918c727..bb6d041c7c 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -976,6 +976,11 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
ClCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
+BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+{
+ ClCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+}
+
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
ClCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 3f7b282649..92f8db74e3 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -495,12 +495,13 @@ ARMNN_AUTO_TEST_CASE(SpaceToDepthNhwcQSymm16, SpaceToDepthNhwcQSymm16Test)
ARMNN_AUTO_TEST_CASE(SpaceToDepthNchwQSymm16, SpaceToDepthNchwQSymm16Test)
// Stack
-ARMNN_AUTO_TEST_CASE(Stack0Axis, Stack0AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput1Axis, Stack4dOutput1AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput2Axis, Stack4dOutput2AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput3Axis, Stack4dOutput3AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack3dOutput1Axis3Input, Stack3dOutput1Axis3InputTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack5dOutput, Stack5dOutputTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack0Axis, StackAxis0Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis1, StackOutput4DAxis1Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis2, StackOutput4DAxis2Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis3, StackOutput4DAxis3Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput3DInputs3, StackOutput3DInputs3Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput5D, StackOutput5DFloat32Test)
+ARMNN_AUTO_TEST_CASE(StackFloat16, StackFloat16Test)
// Strided Slice
ARMNN_AUTO_TEST_CASE(StridedSlice4dFloat32, StridedSlice4dFloat32Test)
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index e6e1574ae8..643ecd5a4f 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -875,6 +875,13 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
NeonCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+{
+ NeonCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+}
+#endif
+
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
NeonCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 5a0c966a6d..d9f78e8a5f 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -730,12 +730,12 @@ ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
// Stack
-ARMNN_AUTO_TEST_CASE(Stack0Axis, Stack0AxisTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput1Axis, Stack4dOutput1AxisTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput2Axis, Stack4dOutput2AxisTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput3Axis, Stack4dOutput3AxisTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack3dOutput1Axis3Input, Stack3dOutput1Axis3InputTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack5dOutput, Stack5dOutputTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack0Axis, StackAxis0Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis1, StackOutput4DAxis1Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis2, StackOutput4DAxis2Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis3, StackOutput4DAxis3Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput3DInputs3, StackOutput3DInputs3Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput5D, StackOutput5DFloat32Test)
// TransposeConvolution2d
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 5c53f12eb1..5692f9e143 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1481,9 +1481,10 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index dc97356d57..f2dfb980b3 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -528,10 +528,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefStackWorkload>(descriptor, info);
}
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index af9f645a54..da036a6758 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1335,11 +1335,12 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt16Nhwc,
DataLayout::NCHW)
// Stack
-ARMNN_AUTO_TEST_CASE(Stack0Axis, Stack0AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput1Axis, Stack4dOutput1AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput2Axis, Stack4dOutput2AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack4dOutput3Axis, Stack4dOutput3AxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack3dOutput1Axis3Input, Stack3dOutput1Axis3InputTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(Stack5dOutput, Stack5dOutputTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack0Axis, StackAxis0Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis1, StackOutput4DAxis1Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis2, StackOutput4DAxis2Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput4DAxis3, StackOutput4DAxis3Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput3DInputs3, StackOutput3DInputs3Float32Test)
+ARMNN_AUTO_TEST_CASE(StackOutput5D, StackOutput5DFloat32Test)
+ARMNN_AUTO_TEST_CASE(StackFloat16, StackFloat16Test)
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index c9fd773d5e..18270faf46 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,6 +5,8 @@
#pragma once
+#include "FloatingPointConverter.hpp"
+
#include <armnn/ArmNN.hpp>
#include <ResolveType.hpp>
@@ -142,14 +144,31 @@ private:
const int32_t m_Offset;
};
-class FloatDecoder : public TypedIterator<const float, Decoder<float>>
+class Float16Decoder : public TypedIterator<const Half, Decoder<float>>
{
public:
- FloatDecoder(const float* data)
+ Float16Decoder(const Half* data)
: TypedIterator(data) {}
- FloatDecoder()
- : FloatDecoder(nullptr) {}
+ Float16Decoder()
+ : Float16Decoder(nullptr) {}
+
+ float Get() const override
+ {
+ float val = 0.f;
+ armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
+ return val;
+ }
+};
+
+class Float32Decoder : public TypedIterator<const float, Decoder<float>>
+{
+public:
+ Float32Decoder(const float* data)
+ : TypedIterator(data) {}
+
+ Float32Decoder()
+ : Float32Decoder(nullptr) {}
float Get() const override
{
@@ -238,14 +257,36 @@ private:
const int32_t m_Offset;
};
-class FloatEncoder : public TypedIterator<float, Encoder<float>>
+class Float16Encoder : public TypedIterator<Half, Encoder<float>>
+{
+public:
+ Float16Encoder(Half* data)
+ : TypedIterator(data) {}
+
+ Float16Encoder()
+ : Float16Encoder(nullptr) {}
+
+ void Set(float right) override
+ {
+ armnnUtils::FloatingPointConverter::ConvertFloat32To16(&right, 1, m_Iterator);
+ }
+
+ float Get() const override
+ {
+ float val = 0.f;
+ armnnUtils::FloatingPointConverter::ConvertFloat16To32(m_Iterator, 1, &val);
+ return val;
+ }
+};
+
+class Float32Encoder : public TypedIterator<float, Encoder<float>>
{
public:
- FloatEncoder(float* data)
+ Float32Encoder(float* data)
: TypedIterator(data) {}
- FloatEncoder()
- : FloatEncoder(nullptr) {}
+ Float32Encoder()
+ : Float32Encoder(nullptr) {}
void Set(float right) override
{
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 0101789bec..328a5eb0f7 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -6,6 +6,7 @@
#pragma once
#include "BaseIterator.hpp"
+#include "FloatingPointConverter.hpp"
#include <boost/assert.hpp>
@@ -20,25 +21,29 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
{
switch(info.GetDataType())
{
- case armnn::DataType::QuantisedAsymm8:
+ case DataType::QuantisedAsymm8:
{
return std::make_unique<QASymm8Decoder>(
static_cast<const uint8_t*>(data),
info.GetQuantizationScale(),
info.GetQuantizationOffset());
}
- case armnn::DataType::QuantisedSymm16:
+ case DataType::QuantisedSymm16:
{
return std::make_unique<QSymm16Decoder>(
static_cast<const int16_t*>(data),
info.GetQuantizationScale(),
info.GetQuantizationOffset());
}
- case armnn::DataType::Float32:
+ case DataType::Float16:
{
- return std::make_unique<FloatDecoder>(static_cast<const float*>(data));
+ return std::make_unique<Float16Decoder>(static_cast<const Half*>(data));
}
- case armnn::DataType::Signed32:
+ case DataType::Float32:
+ {
+ return std::make_unique<Float32Decoder>(static_cast<const float*>(data));
+ }
+ case DataType::Signed32:
{
const float scale = info.GetQuantizationScale();
if (scale == 0.f)
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index f0e40d224b..2b3a11af06 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -38,9 +38,13 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
{
return std::make_unique<Int32Encoder>(static_cast<int32_t*>(data));
}
+ case armnn::DataType::Float16:
+ {
+ return std::make_unique<Float16Encoder>(static_cast<Half*>(data));
+ }
case armnn::DataType::Float32:
{
- return std::make_unique<FloatEncoder>(static_cast<float*>(data));
+ return std::make_unique<Float32Encoder>(static_cast<float*>(data));
}
default:
{