aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp7
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp4
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp27
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp16
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp229
-rw-r--r--src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp84
12 files changed, 381 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 77067d9c6c..2e171f98ae 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -88,6 +88,13 @@ bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo&, // input
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsCastSupported(const TensorInfo&, //input
+ const TensorInfo&, //output
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsComparisonSupported(const TensorInfo&, // input0
const TensorInfo&, // input1
const TensorInfo&, // output
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index e04d657716..a6f1b34b61 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -47,6 +47,10 @@ public:
const BatchToSpaceNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsCastSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 2c5303c019..100d23ee39 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -706,6 +706,33 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
}
}
+void CastQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"CastQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 1);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::BFloat16,
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16,
+ DataType::Signed32,
+ DataType::Signed64
+ };
+
+ ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
const std::string descriptorName{"SoftmaxQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 8a2dd1fe78..abaa4f5185 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -155,6 +155,11 @@ struct ArgMinMaxQueueDescriptor : QueueDescriptorWithParameters<ArgMinMaxDescrip
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct CastQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Fill layer workload data.
struct FillQueueDescriptor : QueueDescriptorWithParameters<FillDescriptor>
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 20d7134c3a..9d7d5bd26d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -138,6 +138,16 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Cast:
+ {
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
case LayerType::Comparison:
{
auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
@@ -1345,6 +1355,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToS
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 13fd190ea2..42360d37ff 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -87,6 +87,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ virtual std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
virtual std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 54c791677f..6e4a8c75d2 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -43,6 +43,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/AdditionTestImpl.cpp \
test/layerTests/ArgMinMaxTestImpl.cpp \
test/layerTests/BatchNormalizationTestImpl.cpp \
+ test/layerTests/CastTestImpl.cpp \
test/layerTests/ComparisonTestImpl.cpp \
test/layerTests/ConcatTestImpl.cpp \
test/layerTests/ConstantTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 248ada92a2..98b800bda2 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -68,6 +68,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/BatchNormalizationTestImpl.cpp
layerTests/BatchNormalizationTestImpl.hpp
layerTests/BatchToSpaceNdTestImpl.hpp
+ layerTests/CastTestImpl.cpp
+ layerTests/CastTestImpl.hpp
layerTests/ComparisonTestImpl.cpp
layerTests/ComparisonTestImpl.hpp
layerTests/ConcatTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index c7d1dd2182..b73efbe26c 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -566,6 +566,8 @@ DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
+DECLARE_LAYER_POLICY_1_PARAM(Cast)
+
DECLARE_LAYER_POLICY_2_PARAM(Comparison)
DECLARE_LAYER_POLICY_2_PARAM(Concat)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index a7dcb9988f..c1b4b46863 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -11,6 +11,7 @@
#include <backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp>
#include <backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp>
+#include <backendsCommon/test/layerTests/CastTestImpl.hpp>
#include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp>
#include <backendsCommon/test/layerTests/ConcatTestImpl.hpp>
#include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
new file mode 100644
index 0000000000..ad23b8c767
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
@@ -0,0 +1,229 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "CastTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
+
+
+template<armnn::DataType inputDataType, armnn::DataType outputDataType, typename TInput, typename TOutput>
+LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const std::vector<TInput>& inputValues,
+ const std::vector<TOutput>& outputValues)
+{
+ IgnoreUnused(memoryManager);
+ armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, inputDataType);
+ armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, outputDataType);
+ float quantizationScale = 1.0f;
+ int32_t quantizationOffset = 0;
+
+ if(armnn::IsQuantizedType<TInput>())
+ {
+ inputTensorInfo.SetQuantizationScale(quantizationScale);
+ inputTensorInfo.SetQuantizationOffset(quantizationOffset);
+ }
+ if(armnn::IsQuantizedType<TOutput>())
+ {
+ outputTensorInfo.SetQuantizationScale(quantizationScale);
+ outputTensorInfo.SetQuantizationOffset(quantizationOffset);
+ }
+
+ auto input = MakeTensor<TInput, 4>(inputTensorInfo, inputValues);
+
+ LayerTestResult<TOutput, 4> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outputValues);
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::CastQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateCast(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ return ret;
+}
+
+LayerTestResult<float, 4> CastInt32ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<int32_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ return CastTest<armnn::DataType::Signed32, armnn::DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<float, 4> CastInt16ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<int16_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ return CastTest<armnn::DataType::QSymmS16, armnn::DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<float, 4> CastInt8ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ return CastTest<armnn::DataType::QSymmS8, armnn::DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<float, 4> CastInt8AsymmToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ std::vector<float> outputValues = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ return CastTest<armnn::DataType::QAsymmS8, armnn::DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues, outputValues);
+}
+
+LayerTestResult<float, 4> CastUInt8ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<u_int8_t> inputValues = { 1, 3, 1, 3, 1, 3, 1, 3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ std::vector<float> outputValues = { 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f,
+ 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ return CastTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<uint8_t, 4> CastInt8ToUInt82dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, -1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ return CastTest<armnn::DataType::QSymmS8, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<uint8_t, 4> CastInt8AsymmToUInt82dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, -1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ return CastTest<armnn::DataType::QAsymmS8, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<float, 4> CastFloat16ToFloat322dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ using namespace half_float::literal;
+
+ std::vector<armnn::Half> inputValues = { -1.10_h, -3._h, -1.30_h, -3._h, -1._h, -3._h, -1._h, -3._h, 1._h,
+ 3.10_h, 1._h, 3.30_h, 1._h, 2._h, 1._h, 3._h, 1._h, 3._h };
+ std::vector<float> outputValues = { -1.1f, -3.0f, -1.3f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.1f, 1.0f, 3.3f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ return CastTest<armnn::DataType::Float16, armnn::DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<float, 4> CastBFloat16ToFloat322dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+
+ std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
+ {
+ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+ },
+ 1.0f, 0);
+
+
+ std::vector<float> outputValues = { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f };
+
+ return CastTest<armnn::DataType::BFloat16, armnn::DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues, outputValues);
+}
+
+LayerTestResult<armnn::Half, 4> CastFloat32ToFloat162dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ using namespace half_float::literal;
+
+ std::vector<float> inputValues = { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f,
+ 0.00000004f, 3.4E38f, 300.0f, 0.5f, 1.3f, 1.5f, 2.1E4f, 8.76f, 15.2f, 37.5f };
+ std::vector<armnn::Half> outputValues = {-37.50_h, -15.20_h, -8.76_h, -2._h, -1.50_h, -1.30_h, -0.50_h, -0.40_h,
+ 0._h, 6.55E4_h, 300._h, 0.50_h, 1.30_h, 1.50_h, 2.1E4_h, 8.76_h, 15.20_h, 37.50_h};
+
+ return CastTest<armnn::DataType::Float32, armnn::DataType::Float16>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<int8_t , 4> CastFloat32ToInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<float> inputValues = { -1.0f, -3.5f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.1f, 1.5f, 3.9f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ std::vector<int8_t> outputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ return CastTest<armnn::DataType::Float32, armnn::DataType::QAsymmS8>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
+
+LayerTestResult<uint8_t , 4> CastFloat32ToUInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ std::vector<float> inputValues = { -1.0f, -3.5f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.1f, 1.5f, 3.9f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+ std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+ return CastTest<armnn::DataType::Float32, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
+ tensorHandleFactory, inputValues,
+ outputValues);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp
new file mode 100644
index 0000000000..bf8d5a4e24
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/CastTestImpl.hpp
@@ -0,0 +1,84 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <Half.hpp>
+
+template<armnn::DataType inputDataType, armnn::DataType outputDataType,
+ typename TInput=armnn::ResolveType<inputDataType>,
+ typename TOutput=armnn::ResolveType<outputDataType>>
+LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const std::vector<TInput>& inputTensor,
+ const std::vector<TOutput>& outputTensor);
+
+
+LayerTestResult<float, 4> CastInt32ToFloat2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> CastInt16ToFloat2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> CastInt8ToFloat2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> CastInt8AsymmToFloat2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> CastUInt8ToFloat2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> CastInt8ToUInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> CastInt8AsymmToUInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> CastFloat16ToFloat322dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> CastBFloat16ToFloat322dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 4> CastFloat32ToFloat162dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t , 4> CastFloat32ToInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t , 4> CastFloat32ToUInt82dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);