aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-09-12 09:08:23 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-09-12 15:28:01 +0000
commit9bff14458f9950a5d31b9523c62c0bbf79a65fcf (patch)
tree8252812da63458e38b704a3abba3e1d5a35e1bf2 /src/backends/reference
parent1e0466c4ab26e82abed7f8f263dfe6a2a543cc1a (diff)
downloadarmnn-9bff14458f9950a5d31b9523c62c0bbf79a65fcf.tar.gz
IVGCVSW-3857 Add Reference FP16 workload support to remaining layers
* Adds Reference FP16 support and unit tests for layers not already supported !referencetests:202156 Signed-off-by: Matthew Jackson <matthew.jackson@arm.com> Change-Id: I6fc9b9ce2809e163f72e27e877025c8fb85d9fbe
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp63
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp60
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp84
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp129
-rw-r--r--src/backends/reference/test/RefOptimizedNetworkTests.cpp61
5 files changed, 232 insertions, 165 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 4958968175..465d45cbae 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -70,9 +70,10 @@ bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo&
Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -202,9 +203,10 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
{
ignore_unused(descriptor);
- std::array<DataType, 3> supportedTypes =
+ std::array<DataType, 4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -249,9 +251,10 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
std::string outputTensorStr = "output";
// Define supported types.
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -290,9 +293,10 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -556,8 +560,9 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -590,9 +595,10 @@ bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -637,9 +643,10 @@ bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
ignore_unused(output);
bool supported = true;
- std::array<DataType,2> supportedTypes =
+ std::array<DataType,3> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedSymm16
};
@@ -716,9 +723,10 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0,
armnn::Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -745,9 +753,10 @@ bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -923,8 +932,9 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1053,8 +1063,9 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1288,9 +1299,10 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1313,9 +1325,10 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1337,9 +1350,10 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1366,9 +1380,10 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
{
ignore_unused(output);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1392,9 +1407,10 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
{
ignore_unused(output);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1420,9 +1436,10 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1445,9 +1462,10 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
{
ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1465,9 +1483,10 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
{
ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1551,8 +1570,9 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1585,9 +1605,10 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input,
{
bool supported = true;
- std::array<DataType, 3> supportedTypes
+ std::array<DataType, 4> supportedTypes
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index a3d4bf08c2..52dffcc1f8 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -137,20 +137,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const Activation
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefSoftmaxWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefSplitterWorkload>(descriptor, info);
}
@@ -248,10 +240,6 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMemImport(const MemI
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefResizeWorkload>(descriptor, info);
}
@@ -283,10 +271,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2Nor
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefConcatWorkload>(descriptor, info);
}
@@ -305,20 +289,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueD
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefSpaceToBatchNdWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToDepth(const armnn::SpaceToDepthQueueDescriptor& descriptor,
const armnn::WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefSpaceToDepthWorkload>(descriptor, info);
}
@@ -351,30 +327,18 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16(
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateDivision(
const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefDivisionWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateSubtraction(
const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefSubtractionWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMaximum(
const MaximumQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefMaximumWorkload>(descriptor, info);
}
@@ -387,10 +351,6 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMean(
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMinimum(
const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefMinimumWorkload>(descriptor, info);
}
@@ -417,10 +377,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescr
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefBatchToSpaceNdWorkload>(descriptor, info);
}
@@ -449,20 +405,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescr
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefRsqrtWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
const armnn::WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefGatherWorkload>(descriptor, info);
}
@@ -487,10 +435,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDequantize(const Dequantize
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefPreluWorkload>(descriptor, info);
}
@@ -510,10 +454,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStack(const StackQueueDescr
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefAbsWorkload>(descriptor, info);
}
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 04c9acbe39..580d8550f0 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -112,7 +112,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
armnn::DataType::QuantisedSymm16>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
SubtractionQueueDescriptor,
@@ -120,6 +120,14 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
armnn::DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+{
+ RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ armnn::DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
@@ -160,7 +168,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
armnn::DataType::QuantisedSymm16>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
+BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
DivisionQueueDescriptor,
@@ -168,6 +176,14 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
armnn::DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
+{
+ RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
+ DivisionQueueDescriptor,
+ DivisionLayer,
+ armnn::DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
@@ -225,6 +241,18 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
(DataLayout::NHWC);
}
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
+{
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
+ (DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
+{
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
+ (DataLayout::NHWC);
+}
+
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
{
RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
@@ -486,6 +514,11 @@ BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
+{
+ RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
{
RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
@@ -523,6 +556,11 @@ BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+{
+ RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
{
RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QuantisedAsymm8>();
@@ -566,6 +604,11 @@ BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
+{
+ RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
{
RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QuantisedAsymm8>();
@@ -654,6 +697,11 @@ BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
+{
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
{
RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
@@ -689,6 +737,11 @@ BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32)
RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateRsqrtFloat16)
+{
+ RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateRsqrtUint8)
{
RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedAsymm8>();
@@ -717,6 +770,11 @@ BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
+{
+ RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
{
RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QuantisedAsymm8>();
@@ -833,6 +891,11 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
+{
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
+}
+
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
@@ -931,6 +994,11 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
}
+BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+{
+ RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
+}
+
BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
{
RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedAsymm8);
@@ -948,6 +1016,13 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
armnn::InvalidArgumentException);
}
+BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
+{
+ BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+ armnn::DataType::Float16),
+ armnn::InvalidArgumentException);
+}
+
BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
{
BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
@@ -980,6 +1055,11 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
}
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
+{
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
{
RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index da036a6758..439ac49121 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -362,6 +362,10 @@ ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint8, Simple3dSoftmaxUint8Test, 1.0f)
ARMNN_AUTO_TEST_CASE(Simple4dSoftmax, Simple4dSoftmaxTest, 1.0f)
ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint8, Simple4dSoftmaxUint8Test, 1.0f)
+ARMNN_AUTO_TEST_CASE(SimpleSoftmaxFloat16, SimpleSoftmaxFloat16Test, 1.0f)
+ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxFloat16, Simple3dSoftmaxFloat16Test, 1.0f)
+ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxFloat16, Simple4dSoftmaxFloat16Test, 1.0f)
+
ARMNN_AUTO_TEST_CASE(SimpleSoftmaxUint16, SimpleSoftmaxUint16Test, 1.0f)
ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint16, Simple3dSoftmaxUint16Test, 1.0f)
ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint16, Simple4dSoftmaxUint16Test, 1.0f)
@@ -452,16 +456,19 @@ ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
// Splitter
-ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat, SplitterFloatTest)
+ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat32, SplitterFloat32Test)
+ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat16, SplitterFloat16Test)
ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test)
ARMNN_AUTO_TEST_CASE(SimpleSplitterInt16, SplitterInt16Test)
-ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat, CopyViaSplitterFloatTest)
+ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test)
+ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat16, CopyViaSplitterFloat16Test)
ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
ARMNN_AUTO_TEST_CASE(CopyViaSplitterInt16, CopyViaSplitterInt16Test)
// Concat
ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
+ARMNN_AUTO_TEST_CASE(ConcatFloat16, ConcatFloat16Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest)
ARMNN_AUTO_TEST_CASE(ConcatUint16, ConcatUint16Test)
@@ -489,6 +496,10 @@ ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest)
ARMNN_AUTO_TEST_CASE(SubBroadcast1Element, SubtractionBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(SubBroadcast, SubtractionBroadcastTest)
+ARMNN_AUTO_TEST_CASE(SimpleSubFloat16, SubtractionTest)
+ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementFloat16, SubtractionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(SubBroadcastFloat16, SubtractionBroadcastTest)
+
ARMNN_AUTO_TEST_CASE(SubtractionUint8, SubtractionUint8Test)
ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test)
ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
@@ -502,6 +513,11 @@ ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest)
ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest)
ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
+
+ARMNN_AUTO_TEST_CASE(DivisionFloat16, DivisionFloat16Test)
+ARMNN_AUTO_TEST_CASE(DivisionFloat16Broadcast1Element, DivisionBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE(DivisionFloat16Broadcast1DVector, DivisionBroadcast1DVectorFloat16Test)
+
// NOTE: division by zero for quantized div needs more attention
// see IVGCVSW-1849
ARMNN_AUTO_TEST_CASE(DivisionUint8, DivisionUint8Test)
@@ -516,6 +532,9 @@ ARMNN_AUTO_TEST_CASE(DivisionInt16Broadcast1DVector, DivisionBroadcast1DVectorIn
ARMNN_AUTO_TEST_CASE(SimpleEqual, EqualSimpleTest)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVector, EqualBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_CASE(EqualFloat16, EqualFloat16Test)
+ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVectorFloat16, EqualBroadcast1DVectorFloat16Test)
ARMNN_AUTO_TEST_CASE(EqualUint8, EqualUint8Test)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVectorUint8, EqualBroadcast1DVectorUint8Test)
@@ -527,11 +546,17 @@ ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVector, GreaterBroadcast1DVectorTest)
ARMNN_AUTO_TEST_CASE(GreaterUint8, GreaterUint8Test)
ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorUint8, GreaterBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_CASE(GreaterFloat16, GreaterFloat16Test)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorFloat16, GreaterBroadcast1DVectorFloat16Test)
// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_CASE(MaximumFloat16, MaximumFloat16Test)
+ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementFloat16, MaximumBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorFloat16, MaximumBroadcast1DVectorFloat16Test)
ARMNN_AUTO_TEST_CASE(MaximumUint8, MaximumUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test)
@@ -543,6 +568,9 @@ ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorInt16, MaximumBroadcast1DVectorInt1
ARMNN_AUTO_TEST_CASE(SimpleMinimum1, MinimumBroadcast1ElementTest1)
ARMNN_AUTO_TEST_CASE(SimpleMinimum2, MinimumBroadcast1ElementTest2)
ARMNN_AUTO_TEST_CASE(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_CASE(MinimumFloat16, MinimumFloat16Test)
+ARMNN_AUTO_TEST_CASE(MinimumBroadcast1ElementFloat16, MinimumBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE(MinimumBroadcast1DVectorFloat16, MinimumBroadcast1DVectorFloat16Test)
ARMNN_AUTO_TEST_CASE(MinimumInt16, MinimumInt16Test)
ARMNN_AUTO_TEST_CASE(MinimumBroadcast1ElementInt16, MinimumBroadcast1ElementInt16Test)
ARMNN_AUTO_TEST_CASE(MinimumBroadcast1DVectorInt16, MinimumBroadcast1DVectorInt16Test)
@@ -560,8 +588,10 @@ ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorInt16, MultiplicationBroadca
ARMNN_AUTO_TEST_CASE(Multiplication5d, Multiplication5dTest)
// Batch Norm
-ARMNN_AUTO_TEST_CASE(BatchNormFloat, BatchNormFloatTest)
-ARMNN_AUTO_TEST_CASE(BatchNormFloatNhwc, BatchNormFloatNhwcTest)
+ARMNN_AUTO_TEST_CASE(BatchNormFloat32, BatchNormFloat32Test)
+ARMNN_AUTO_TEST_CASE(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest)
+ARMNN_AUTO_TEST_CASE(BatchNormFloat16, BatchNormFloat16Test)
+ARMNN_AUTO_TEST_CASE(BatchNormFloat16Nhwc, BatchNormFloat16NhwcTest)
ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test)
ARMNN_AUTO_TEST_CASE(BatchNormUint8Nhwc, BatchNormUint8NhwcTest)
ARMNN_AUTO_TEST_CASE(BatchNormInt16, BatchNormInt16Test)
@@ -571,6 +601,9 @@ ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16,
+ SimpleResizeBilinearTest<DataType::Float16>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
DataLayout::NCHW)
@@ -580,6 +613,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16,
ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16,
+ ResizeBilinearNopTest<DataType::Float16>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
DataLayout::NCHW)
@@ -589,6 +625,9 @@ ARMNN_AUTO_TEST_CASE(esizeBilinearNopUint16,
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16,
+ ResizeBilinearSqMinTest<DataType::Float16>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
DataLayout::NCHW)
@@ -598,6 +637,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16,
+ ResizeBilinearMinTest<DataType::Float16>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
DataLayout::NCHW)
@@ -607,6 +649,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMag,
ResizeBilinearMagTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16,
+ ResizeBilinearMagTest<DataType::Float16>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
DataLayout::NCHW)
@@ -618,6 +663,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16,
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16,
+ ResizeBilinearNopTest<DataType::Float16>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
DataLayout::NHWC)
@@ -627,6 +675,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint16Nhwc,
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16,
+ SimpleResizeBilinearTest<DataType::Float16>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
DataLayout::NHWC)
@@ -636,6 +687,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16Nhwc,
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16,
+ ResizeBilinearSqMinTest<DataType::Float16>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
DataLayout::NHWC)
@@ -645,6 +699,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16Nhwc,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16,
+ ResizeBilinearMinTest<DataType::Float16>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
DataLayout::NHWC)
@@ -654,6 +711,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16Nhwc,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
ResizeBilinearMagTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16,
+ ResizeBilinearMagTest<DataType::Float16>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
DataLayout::NHWC)
@@ -863,6 +923,7 @@ ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test,
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleFloorFloat16, SimpleFloorTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QuantisedSymm16>)
// Reshape
@@ -876,6 +937,8 @@ ARMNN_AUTO_TEST_CASE(Rsqrt2d, Rsqrt2dTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Rsqrt3d, Rsqrt3dTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QuantisedSymm16>)
@@ -964,6 +1027,11 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsFloat32, SpaceToBatchNdMultiChan
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockFloat32, SpaceToBatchNdMultiBlockFloat32Test)
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingFloat32, SpaceToBatchNdPaddingFloat32Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleFloat16, SpaceToBatchNdSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsFloat16, SpaceToBatchNdMultiChannelsFloat16Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockFloat16, SpaceToBatchNdMultiBlockFloat16Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingFloat16, SpaceToBatchNdPaddingFloat16Test)
+
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleUint8, SpaceToBatchNdSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsUint8, SpaceToBatchNdMultiChannelsUint8Test)
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockUint8, SpaceToBatchNdMultiBlockUint8Test)
@@ -974,6 +1042,11 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNhwcFloat32, SpaceToBatchNdMulti
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcFloat32, SpaceToBatchNdMultiBlockNhwcFloat32Test)
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNhwcFloat32, SpaceToBatchNdPaddingNhwcFloat32Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleNhwcFloat16, SpaceToBatchNdSimpleNhwcFloat16Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNhwcFloat16, SpaceToBatchNdMultiChannelsNhwcFloat16Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcFloat16, SpaceToBatchNdMultiBlockNhwcFloat16Test)
+ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNhwcFloat16, SpaceToBatchNdPaddingNhwcFloat16Test)
+
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleNhwcUint8, SpaceToBatchNdSimpleNhwcUint8Test)
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNhwcUint8, SpaceToBatchNdMultiChannelsNhwcUint8Test)
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcUint8, SpaceToBatchNdMultiBlockNhwcUint8Test)
@@ -990,13 +1063,21 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcUint16, SpaceToBatchNdMultiBloc
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNhwcUint16, SpaceToBatchNdPaddingNhwcUint16Test)
// BatchToSpace
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat1, BatchToSpaceNdNhwcTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat2, BatchToSpaceNdNhwcTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat3, BatchToSpaceNdNhwcTest3<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat4, BatchToSpaceNdNhwcTest4<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat5, BatchToSpaceNdNhwcTest5<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat6, BatchToSpaceNdNhwcTest6<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat7, BatchToSpaceNdNhwcTest7<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_1, BatchToSpaceNdNhwcTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_2, BatchToSpaceNdNhwcTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_3, BatchToSpaceNdNhwcTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_4, BatchToSpaceNdNhwcTest4<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_5, BatchToSpaceNdNhwcTest5<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_6, BatchToSpaceNdNhwcTest6<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_7, BatchToSpaceNdNhwcTest7<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_1, BatchToSpaceNdNhwcTest1<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_2, BatchToSpaceNdNhwcTest2<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_3, BatchToSpaceNdNhwcTest3<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_4, BatchToSpaceNdNhwcTest4<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QuantisedAsymm8>)
@@ -1014,13 +1095,13 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5, BatchToSpaceNdNhwcTest5<DataT
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_6, BatchToSpaceNdNhwcTest6<DataType::QuantisedSymm16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7, BatchToSpaceNdNhwcTest7<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat1, BatchToSpaceNdNchwTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat2, BatchToSpaceNdNchwTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat3, BatchToSpaceNdNchwTest3<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat4, BatchToSpaceNdNchwTest4<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat5, BatchToSpaceNdNchwTest5<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat6, BatchToSpaceNdNchwTest6<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat7, BatchToSpaceNdNchwTest7<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_1, BatchToSpaceNdNchwTest1<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_2, BatchToSpaceNdNchwTest2<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_3, BatchToSpaceNdNchwTest3<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_4, BatchToSpaceNdNchwTest4<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_6, BatchToSpaceNdNchwTest6<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QuantisedAsymm8>)
@@ -1100,13 +1181,16 @@ ARMNN_AUTO_TEST_CASE(Debug2dQSymm16, Debug2dInt16Test)
ARMNN_AUTO_TEST_CASE(Debug1dQSymm16, Debug1dInt16Test)
// Gather
-ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat, Gather1dParamsFloatTest)
+ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat16, Gather1dParamsFloat16Test)
ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test)
ARMNN_AUTO_TEST_CASE(Gather1dParamsInt16, Gather1dParamsInt16Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat, GatherMultiDimParamsFloatTest)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat16, GatherMultiDimParamsFloat16Test)
ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsInt16, GatherMultiDimParamsInt16Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat, GatherMultiDimParamsMultiDimIndicesFloatTest)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat32, GatherMultiDimParamsMultiDimIndicesFloat32Test)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat16, GatherMultiDimParamsMultiDimIndicesFloat16Test)
ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesUint8, GatherMultiDimParamsMultiDimIndicesUint8Test)
ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesInt16, GatherMultiDimParamsMultiDimIndicesInt16Test)
@@ -1114,6 +1198,8 @@ ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesInt16, GatherMultiDimPar
ARMNN_AUTO_TEST_CASE(Abs2d, Abs2dTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Abs3d, Abs3dTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Abs2dFloat16, Abs2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Abs3dFloat16, Abs3dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QuantisedSymm16>)
@@ -1161,6 +1247,7 @@ ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test)
// PReLU
ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(PreluFloat16, PreluTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest<DataType::QuantisedSymm16>)
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 1a29e73af8..cd3708cc4f 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -149,67 +149,6 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
}
}
-BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnCpuRef)
-{
- // Test to check when FP16 Turbo mode set
- // it converts the FP32 network to FP16 Network
- // add FP32ToFP16 conversion layer after the InputLayer
- // add FP16ToFP32 conversion layer after the OutputLayer
- // checks the other layers if they are supported in FP16
- // if they are not put the conversion layers before and after
- // if they are not supported in FP16 use FP32 instead
- // if there are inverse conversion layers remove them with optimization
- // at the moment FloorLayer is not supported in FP16 so it rolls back to FP32
- // and inverse conversion layers are removed by the optimizer
- armnn::Network net;
-
- // Defines layers.
- auto input = net.AddInputLayer(0);
- auto floor = net.AddFloorLayer();
- auto output = net.AddOutputLayer(0);
-
- // Connects layers.
- input->GetOutputSlot(0).Connect(floor->GetInputSlot(0));
- floor->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- armnn::TensorShape shape({4});
- armnn::TensorInfo info(shape, armnn::DataType::Float32);
- input->GetOutputSlot(0).SetTensorInfo(info);
- floor->GetOutputSlot(0).SetTensorInfo(info);
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
- armnn::OptimizerOptions optimizerOptions;
- optimizerOptions.m_ReduceFp32ToFp16 = true;
-
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(),
- optimizerOptions);
-
- std::ostringstream ss;
- optimizedNet->SerializeToDot(ss);
-
- auto inputId = input->GetGuid();
- auto floorId = floor->GetGuid();
- auto outputId = output->GetGuid();
-
- std::stringstream expected;
- expected <<
- "digraph Optimized {\n"
- " node [shape=\"record\"];\n"
- " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
- " " << inputId << " [label=\"{Input|LayerType : Input\\lBackendID : CpuRef\\l}\"];\n"
- " " << floorId << " [label=\"{Floor|LayerType : Floor\\lBackendID : CpuRef\\l}\"];\n"
- " " << outputId << " [label=\"{Output|LayerType : Output\\lBackendID : CpuRef\\l}\"];\n"
- " " << inputId << " -> " << floorId << " [label=< [4] >];\n"
- " " << floorId << " -> " << outputId << " [label=< [4] >];\n"
- "}\n";
-
- BOOST_TEST(ss.str() == expected.str());
-}
-
BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef)
{
armnn::Network net;