aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2020-01-10 17:14:08 +0000
committerKevin May <kevin.may@arm.com>2020-01-13 18:18:12 +0000
commitf90c56d72de4848a2dc5844a97458aaf09df07c2 (patch)
tree71f1c6f16a4687286614f5526ed70938a611b27d
parent842e0dbd40114e19bf26916fefe06c869dbe416d (diff)
downloadarmnn-f90c56d72de4848a2dc5844a97458aaf09df07c2.tar.gz
Rename quantized data types to remove ambiguity for signed/unsigned payloads
!android-nn-driver:2572 Change-Id: I8fe52ceb09987b3d05c539409510f535165455cc Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--include/armnn/Types.hpp10
-rw-r--r--include/armnn/TypesUtils.hpp14
-rw-r--r--include/armnnQuantizer/INetworkQuantizer.hpp2
-rw-r--r--src/armnn/CompatibleTypes.hpp4
-rw-r--r--src/armnn/LayerSupportCommon.hpp2
-rw-r--r--src/armnn/Network.cpp2
-rw-r--r--src/armnn/NetworkQuantizationScheme.hpp4
-rw-r--r--src/armnn/NetworkQuantizer.cpp4
-rw-r--r--src/armnn/NetworkQuantizerUtils.cpp2
-rw-r--r--src/armnn/ResolveType.hpp4
-rw-r--r--src/armnn/Tensor.cpp2
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp32
-rw-r--r--src/armnn/test/CreateWorkload.hpp24
-rw-r--r--src/armnn/test/NetworkTests.cpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp14
-rw-r--r--src/armnn/test/QuantizerTest.cpp94
-rw-r--r--src/armnn/test/RuntimeTests.cpp4
-rw-r--r--src/armnn/test/TensorTest.cpp4
-rw-r--r--src/armnn/test/UtilsTests.cpp2
-rw-r--r--src/armnnDeserializer/Deserializer.cpp6
-rw-r--r--src/armnnDeserializer/test/DeserializeActivation.cpp4
-rw-r--r--src/armnnDeserializer/test/DeserializeAdd.cpp2
-rw-r--r--src/armnnDeserializer/test/DeserializeComparison.cpp10
-rw-r--r--src/armnnDeserializer/test/DeserializeConstant.cpp2
-rw-r--r--src/armnnDeserializer/test/DeserializeDivision.cpp2
-rw-r--r--src/armnnDeserializer/test/DeserializeFullyConnected.cpp2
-rw-r--r--src/armnnDeserializer/test/DeserializeMultiplication.cpp2
-rw-r--r--src/armnnDeserializer/test/DeserializePad.cpp2
-rw-r--r--src/armnnDeserializer/test/DeserializePermute.cpp4
-rw-r--r--src/armnnDeserializer/test/DeserializePooling2d.cpp4
-rw-r--r--src/armnnDeserializer/test/DeserializeReshape.cpp2
-rw-r--r--src/armnnDeserializer/test/DeserializeSubtraction.cpp2
-rw-r--r--src/armnnQuantizer/ArmNNQuantizerMain.cpp4
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs6
-rw-r--r--src/armnnSerializer/Serializer.cpp4
-rw-r--r--src/armnnSerializer/SerializerUtils.cpp12
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp30
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp6
-rw-r--r--src/armnnTfLiteParser/test/Addition.cpp2
-rw-r--r--src/armnnTfLiteParser/test/AvgPool2D.cpp6
-rw-r--r--src/armnnTfLiteParser/test/Concatenation.cpp16
-rw-r--r--src/armnnTfLiteParser/test/Constant.cpp2
-rw-r--r--src/armnnTfLiteParser/test/Conv2D.cpp8
-rw-r--r--src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp6
-rw-r--r--src/armnnTfLiteParser/test/Dequantize.cpp4
-rw-r--r--src/armnnTfLiteParser/test/DetectionPostProcess.cpp6
-rw-r--r--src/armnnTfLiteParser/test/FullyConnected.cpp6
-rw-r--r--src/armnnTfLiteParser/test/MaxPool2D.cpp6
-rw-r--r--src/armnnTfLiteParser/test/Reshape.cpp8
-rw-r--r--src/armnnTfLiteParser/test/Softmax.cpp2
-rw-r--r--src/armnnTfLiteParser/test/Split.cpp8
-rw-r--r--src/armnnTfLiteParser/test/Squeeze.cpp4
-rw-r--r--src/armnnTfLiteParser/test/Sub.cpp2
-rw-r--r--src/armnnTfLiteParser/test/TransposeConv.cpp2
-rw-r--r--src/armnnTfLiteParser/test/Unpack.cpp4
-rw-r--r--src/armnnTfLiteParser/test/Unsupported.cpp2
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp4
-rw-r--r--src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp2
-rw-r--r--src/backends/backendsCommon/LayerSupportRules.hpp4
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp4
-rw-r--r--src/backends/backendsCommon/Workload.hpp6
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp168
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp4
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp2
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp16
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp8
-rw-r--r--src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp44
-rw-r--r--src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp24
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp2
-rw-r--r--src/backends/backendsCommon/test/WorkloadTestUtils.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp20
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp46
-rw-r--r--src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp20
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp36
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp64
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp72
-rw-r--r--src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp32
-rw-r--r--src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp28
-rw-r--r--src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp20
-rw-r--r--src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp80
-rw-r--r--src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp80
-rw-r--r--src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp32
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp54
-rw-r--r--src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp34
-rw-r--r--src/backends/cl/ClLayerSupport.cpp2
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp26
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp48
-rw-r--r--src/backends/cl/test/ClLayerSupportTests.cpp2
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp160
-rw-r--r--src/backends/cl/workloads/ClGreaterWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClGreaterWorkload.hpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp2
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp28
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp72
-rw-r--r--src/backends/neon/test/NeonLayerSupportTests.cpp2
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp164
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonGreaterWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonGreaterWorkload.hpp2
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp2
-rw-r--r--src/backends/reference/RefLayerSupport.cpp192
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp114
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp136
-rw-r--r--src/backends/reference/test/RefLayerSupportTests.cpp4
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp384
-rw-r--r--src/backends/reference/workloads/Decoders.hpp4
-rw-r--r--src/backends/reference/workloads/Encoders.hpp4
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp4
-rw-r--r--tests/DeepSpeechV1Database.hpp2
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.cpp4
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.hpp2
-rw-r--r--tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp2
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp12
143 files changed, 1502 insertions, 1484 deletions
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index dcc8c9e52c..e5a7fc0987 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -10,6 +10,7 @@
#include <stdint.h>
#include "BackendId.hpp"
#include "Exceptions.hpp"
+#include "Deprecated.hpp"
namespace armnn
{
@@ -32,12 +33,15 @@ enum class DataType
{
Float16 = 0,
Float32 = 1,
- QuantisedAsymm8 = 2,
+ QAsymmU8 = 2,
Signed32 = 3,
Boolean = 4,
- QuantisedSymm16 = 5,
+ QSymmS16 = 5,
QuantizedSymm8PerAxis = 6,
- QSymmS8 = 7
+ QSymmS8 = 7,
+
+ QuantisedAsymm8 ARMNN_DEPRECATED_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
+ QuantisedSymm16 ARMNN_DEPRECATED_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
};
enum class DataLayout
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index 4394d9ddd9..065b6839fc 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -115,10 +115,10 @@ constexpr unsigned int GetDataTypeSize(DataType dataType)
case DataType::Float16: return 2U;
case DataType::Float32:
case DataType::Signed32: return 4U;
- case DataType::QuantisedAsymm8: return 1U;
+ case DataType::QAsymmU8: return 1U;
case DataType::QSymmS8: return 1U;
case DataType::QuantizedSymm8PerAxis: return 1U;
- case DataType::QuantisedSymm16: return 2U;
+ case DataType::QSymmS16: return 2U;
case DataType::Boolean: return 1U;
default: return 0U;
}
@@ -163,10 +163,10 @@ constexpr const char* GetDataTypeName(DataType dataType)
{
case DataType::Float16: return "Float16";
case DataType::Float32: return "Float32";
- case DataType::QuantisedAsymm8: return "QAsymm8";
- case DataType::QSymmS8: return "QSymm8";
+ case DataType::QAsymmU8: return "QAsymmU8";
+ case DataType::QSymmS8: return "QSymmS8";
case DataType::QuantizedSymm8PerAxis: return "QSymm8PerAxis";
- case DataType::QuantisedSymm16: return "QSymm16";
+ case DataType::QSymmS16: return "QSymm16";
case DataType::Signed32: return "Signed32";
case DataType::Boolean: return "Boolean";
@@ -199,9 +199,9 @@ constexpr bool IsQuantizedType()
constexpr bool IsQuantizedType(DataType dataType)
{
- return dataType == DataType::QuantisedAsymm8 ||
+ return dataType == DataType::QAsymmU8 ||
dataType == DataType::QSymmS8 ||
- dataType == DataType::QuantisedSymm16 ||
+ dataType == DataType::QSymmS16 ||
dataType == DataType::QuantizedSymm8PerAxis;
}
diff --git a/include/armnnQuantizer/INetworkQuantizer.hpp b/include/armnnQuantizer/INetworkQuantizer.hpp
index 826b077f6e..5fc5763216 100644
--- a/include/armnnQuantizer/INetworkQuantizer.hpp
+++ b/include/armnnQuantizer/INetworkQuantizer.hpp
@@ -14,7 +14,7 @@ namespace armnn
struct QuantizerOptions
{
- QuantizerOptions() : QuantizerOptions(DataType::QuantisedAsymm8, false) {}
+ QuantizerOptions() : QuantizerOptions(DataType::QAsymmU8, false) {}
QuantizerOptions(DataType activationFormat) : QuantizerOptions(activationFormat, false) {}
diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp
index cc545a9642..bca092ca0c 100644
--- a/src/armnn/CompatibleTypes.hpp
+++ b/src/armnn/CompatibleTypes.hpp
@@ -32,7 +32,7 @@ inline bool CompatibleTypes<Half>(DataType dataType)
template<>
inline bool CompatibleTypes<uint8_t>(DataType dataType)
{
- return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8;
+ return dataType == DataType::Boolean || dataType == DataType::QAsymmU8;
}
template<>
@@ -44,7 +44,7 @@ inline bool CompatibleTypes<int8_t>(DataType dataType)
template<>
inline bool CompatibleTypes<int16_t>(DataType dataType)
{
- return dataType == DataType::QuantisedSymm16;
+ return dataType == DataType::QSymmS16;
}
template<>
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index 557e72a323..e0c6b8040c 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -40,7 +40,7 @@ bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
return float16FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
case DataType::Float32:
return float32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
return uint8FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
case DataType::Signed32:
return int32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 1406160914..554e2e26a5 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -101,7 +101,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
for (unsigned int i = 0; i < numOutputs; i++) {
OutputSlot& outputSlot = layer->GetOutputSlot(i);
TensorInfo info = outputSlot.GetTensorInfo();
- if (DataType::QuantisedAsymm8 == info.GetDataType()) {
+ if (DataType::QAsymmU8 == info.GetDataType()) {
if (0.f == info.GetQuantizationScale()) {
noErrors = false;
std::stringstream ss;
diff --git a/src/armnn/NetworkQuantizationScheme.hpp b/src/armnn/NetworkQuantizationScheme.hpp
index ea3c29102b..a5b7542748 100644
--- a/src/armnn/NetworkQuantizationScheme.hpp
+++ b/src/armnn/NetworkQuantizationScheme.hpp
@@ -58,7 +58,7 @@ struct QAsymm8QuantizationScheme : IQuantizationScheme
int NumBits() const override { return 8; }
- DataType GetDataType() const override { return DataType::QuantisedAsymm8; }
+ DataType GetDataType() const override { return DataType::QAsymmU8; }
};
struct QSymmS8QuantizationScheme : IQuantizationScheme
@@ -119,7 +119,7 @@ struct QSymm16QuantizationScheme : IQuantizationScheme
int NumBits() const override { return 16; }
- DataType GetDataType() const override { return DataType::QuantisedSymm16; }
+ DataType GetDataType() const override { return DataType::QSymmS16; }
};
} // namespace armnn
diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp
index f6d625fda3..5e00de5fcb 100644
--- a/src/armnn/NetworkQuantizer.cpp
+++ b/src/armnn/NetworkQuantizer.cpp
@@ -160,13 +160,13 @@ INetworkPtr NetworkQuantizer::ExportNetwork()
std::unique_ptr<IQuantizationScheme> quantizationScheme;
switch (m_Options.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
quantizationScheme = std::make_unique<QAsymm8QuantizationScheme>();
break;
case DataType::QSymmS8:
quantizationScheme = std::make_unique<QSymmS8QuantizationScheme>();
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
quantizationScheme = std::make_unique<QSymm16QuantizationScheme>();
break;
default:
diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp
index a6f9ebdc42..75473b4ae6 100644
--- a/src/armnn/NetworkQuantizerUtils.cpp
+++ b/src/armnn/NetworkQuantizerUtils.cpp
@@ -36,7 +36,7 @@ ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>
BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type");
}
- TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QuantisedAsymm8, scale, offset);
+ TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
return ConstTensor(qInfo, backing);
}
diff --git a/src/armnn/ResolveType.hpp b/src/armnn/ResolveType.hpp
index ba3d0fca5c..c7a244dba8 100644
--- a/src/armnn/ResolveType.hpp
+++ b/src/armnn/ResolveType.hpp
@@ -27,7 +27,7 @@ struct ResolveTypeImpl<DataType::Float32>
};
template<>
-struct ResolveTypeImpl<DataType::QuantisedAsymm8>
+struct ResolveTypeImpl<DataType::QAsymmU8>
{
using Type = uint8_t;
};
@@ -39,7 +39,7 @@ struct ResolveTypeImpl<DataType::QSymmS8>
};
template<>
-struct ResolveTypeImpl<DataType::QuantisedSymm16>
+struct ResolveTypeImpl<DataType::QSymmS16>
{
using Type = int16_t;
};
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index 171e02ad13..8eebc43cb5 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -289,7 +289,7 @@ void TensorInfo::SetQuantizationDim(const Optional<unsigned int>& quantizationDi
bool TensorInfo::IsQuantized() const
{
- return m_DataType == DataType::QuantisedAsymm8 || m_DataType == DataType::QuantisedSymm16;
+ return m_DataType == DataType::QAsymmU8 || m_DataType == DataType::QSymmS16;
}
// ---
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index cfcdb1d2ff..ada665e4e9 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -1248,43 +1248,43 @@ BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+ 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+ 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -1338,43 +1338,43 @@ BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+ 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+ 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index f6928f858f..02ce12a304 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -399,12 +399,12 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
// Weights and bias tensor and quantization info
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
@@ -463,17 +463,17 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
// Input/output tensor info and quantization info
armnn::TensorInfo inputInfo({numBatches , inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
@@ -530,8 +530,8 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
@@ -637,8 +637,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
@@ -1361,7 +1361,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
if (biasEnabled)
{
- constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QuantisedAsymm8) ?
+ constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
armnn::DataType::Signed32 : armnn::DataType::Float32;
TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0);
@@ -1396,14 +1396,14 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
// set the tensors in the network (NHWC format)
TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
- if (dataType == armnn::DataType::QuantisedAsymm8)
+ if (dataType == armnn::DataType::QAsymmU8)
{
inputTensorInfo.SetQuantizationOffset(0);
inputTensorInfo.SetQuantizationScale(0.9f);
}
TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType);
- if (dataType == armnn::DataType::QuantisedAsymm8)
+ if (dataType == armnn::DataType::QAsymmU8)
{
outputTensorInfo.SetQuantizationOffset(0);
outputTensorInfo.SetQuantizationScale(0.9f);
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index d8b4e17a3c..5d1313f61f 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -392,7 +392,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QuantisedAsymm8));
+ BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
}
bool m_Visited = false;
@@ -411,7 +411,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
armnn::TensorInfo infoIn({3,1}, armnn::DataType::Float32);
input->GetOutputSlot(0).SetTensorInfo(infoIn);
- armnn::TensorInfo infoOut({3,1}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo infoOut({3,1}, armnn::DataType::QAsymmU8);
quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
Test testQuantize;
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index ba18aa31e8..e310d4f140 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -496,15 +496,15 @@ BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
{
Graph graph;
- armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QuantisedAsymm8);
- armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QuantisedAsymm8);
+ armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
+ armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
std::vector<uint8_t> anchorsVector(40);
- armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QuantisedAsymm8), anchorsVector);
+ armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8), anchorsVector);
- armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QuantisedAsymm8);
- armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QuantisedAsymm8);
- armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QuantisedAsymm8);
- armnn::TensorInfo numDetectionInfo({1}, DataType::QuantisedAsymm8);
+ armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
+ armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
+ armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
+ armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index e147a84eb6..900aa1813e 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -82,17 +82,17 @@ protected:
{
switch (m_QuantizerOptions.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
TestQuantizationParamsImpl(
- info, DataType::QuantisedAsymm8, qAsymm8Params.first, qAsymm8Params.second);
+ info, DataType::QAsymmU8, qAsymm8Params.first, qAsymm8Params.second);
break;
case DataType::QSymmS8:
TestQuantizationParamsImpl(
info, DataType::QSymmS8, qSymm8Params.first, qSymm8Params.second);
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
TestQuantizationParamsImpl(
- info, DataType::QuantisedSymm16, qSymm16Params.first, qSymm16Params.second);
+ info, DataType::QSymmS16, qSymm16Params.first, qSymm16Params.second);
break;
default:
throw InvalidArgumentException("Unsupported quantization target");
@@ -106,27 +106,27 @@ protected:
void TestConstantQuantizationParams(const TensorInfo& info,
const OffsetScalePair& params,
- DataType dataType = DataType::QuantisedAsymm8)
+ DataType dataType = DataType::QAsymmU8)
{
boost::ignore_unused(dataType);
- TestQuantizationParamsImpl(info, DataType::QuantisedAsymm8, params.first, params.second);
+ TestQuantizationParamsImpl(info, DataType::QAsymmU8, params.first, params.second);
}
void TestBiasQuantizationParams(const TensorInfo& info,
const OffsetScalePair& qAsymm8Params,
const OffsetScalePair& qSymm8Params,
const OffsetScalePair& qSymm16Params,
- DataType dataType = DataType::QuantisedAsymm8)
+ DataType dataType = DataType::QAsymmU8)
{
switch (m_QuantizerOptions.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
TestQuantizationParamsImpl(info, dataType, qAsymm8Params.first, qAsymm8Params.second);
break;
case DataType::QSymmS8:
TestQuantizationParamsImpl(info, dataType, qSymm8Params.first, qSymm8Params.second);
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
TestQuantizationParamsImpl(info, dataType, qSymm16Params.first, qSymm16Params.second);
break;
default:
@@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAddition)
TestAdditionQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestAdditionQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -399,7 +399,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbsActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -424,7 +424,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLinearActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -449,7 +449,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReLuActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -474,7 +474,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation)
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -524,7 +524,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
TestBoundedReluActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestBoundedReluActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -575,7 +575,7 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
TestTanHActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestTanHActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -636,7 +636,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
TestLeakyReLuActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestLeakyReLuActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -719,7 +719,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
TestBatchNormalizationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions QQsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions QQsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), QQsymm16Options)->ExportNetwork();
TestBatchNormalizationQuantization validatorQSymm16(QQsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -784,7 +784,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestDepthToSpaceQuantization validatorQSymm16(Qsymm16Options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -944,7 +944,7 @@ void ValidateFullyConnectedLayer(const bool biasEnabled)
TestFullyConnectedQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestFullyConnectedQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1025,7 +1025,7 @@ void TestQuantizeConvolution2d(bool useBiases)
TestConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1106,7 +1106,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
TestDepthwiseConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestDepthwiseConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1177,7 +1177,7 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16Options)->ExportNetwork();
TestInstanceNormalizationQuantization validatorQSymm16(qSymm16Options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1241,7 +1241,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestLogSoftmaxQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1311,7 +1311,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
TestSoftmaxQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSoftmaxQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1348,7 +1348,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStandIn)
armnn::UnimplementedException);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
BOOST_CHECK_THROW(INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(),
armnn::UnimplementedException);
}
@@ -1434,7 +1434,7 @@ BOOST_AUTO_TEST_CASE(QuantizePermute)
TestPermuteQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPermuteQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1484,7 +1484,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch)
TestSpaceToBatchQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSpaceToBatchQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1536,7 +1536,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToDepth)
TestSpaceToDepthQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSpaceToDepthQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1600,7 +1600,7 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d)
TestPooling2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPooling2dQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1666,7 +1666,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
TestConstantQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestConstantQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1721,7 +1721,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbs)
TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1804,7 +1804,7 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
TestArgMinMaxQuantization validatorQSymm8(qSymm8Options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestArgMinMaxQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1868,7 +1868,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestComparisonQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1949,7 +1949,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat)
concatLayer->GetOutputSlot(0).SetTensorInfo(info);
const QuantizerOptions qSymm8Options(DataType::QSymmS8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkQuantizerPtr quantizerPtrQAsymm8 = INetworkQuantizer::Create(network.get());
INetworkQuantizerPtr quantizerPtrQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options);
INetworkQuantizerPtr quantizerPtrQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options);
@@ -2026,7 +2026,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape)
TestReshapeQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestReshapeQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2075,7 +2075,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter)
TestSplitterQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSplitterQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2129,7 +2129,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize)
TestResizeQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestResizeQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2179,7 +2179,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice)
TestStridedSliceQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestStridedSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2229,7 +2229,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace)
TestBatchToSpaceQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestBatchToSpaceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2343,7 +2343,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
TestPreluQuantization validatorQSymm8(qSymm8Options, inputShape, alphaShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPreluQuantization validatorQSymm16(qSymm16options, inputShape, alphaShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2417,7 +2417,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestTransposeConvolution2dQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2500,7 +2500,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStack)
TestStackQuantization validatorQSymm8(qSymm8Options, inputShape, inputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestStackQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2562,7 +2562,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice)
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2679,7 +2679,7 @@ void PreserveTypeTestImpl(const DataType& dataType)
addition->GetOutputSlot(0).SetTensorInfo(info);
QuantizerOptions options = dataType == DataType::Float32 ?
- QuantizerOptions(DataType::QuantisedAsymm8, true) : QuantizerOptions(dataType, true);
+ QuantizerOptions(DataType::QAsymmU8, true) : QuantizerOptions(dataType, true);
INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
TestPreserveType validatorQAsymm8(options, dataType, shape, shape);
@@ -2695,7 +2695,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeFloat32)
BOOST_AUTO_TEST_CASE(PreserveTypeQAsymm8)
{
- PreserveTypeTestImpl(DataType::QuantisedAsymm8);
+ PreserveTypeTestImpl(DataType::QAsymmU8);
}
BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8)
@@ -2705,7 +2705,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8)
BOOST_AUTO_TEST_CASE(PreserveTypeQsymm16)
{
- PreserveTypeTestImpl(DataType::QuantisedSymm16);
+ PreserveTypeTestImpl(DataType::QSymmS16);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 642f334575..7263cbd784 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -261,14 +261,14 @@ BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
1.0f/255,
0
));
softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QuantisedAsymm8
+ armnn::DataType::QAsymmU8
));
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp
index 154a0bca04..3696a112cb 100644
--- a/src/armnn/test/TensorTest.cpp
+++ b/src/armnn/test/TensorTest.cpp
@@ -125,8 +125,8 @@ BOOST_AUTO_TEST_CASE(ModifyTensorInfo)
TensorInfo info;
info.SetShape({ 5, 6, 7, 8 });
BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
- info.SetDataType(DataType::QuantisedAsymm8);
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ info.SetDataType(DataType::QAsymmU8);
+ BOOST_TEST((info.GetDataType() == DataType::QAsymmU8));
info.SetQuantizationScale(10.0f);
BOOST_TEST(info.GetQuantizationScale() == 10.0f);
info.SetQuantizationOffset(5);
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index 897a35fe4c..4c371d6ed9 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -18,7 +18,7 @@ BOOST_AUTO_TEST_SUITE(Utils)
BOOST_AUTO_TEST_CASE(DataTypeSize)
{
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
- BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QuantisedAsymm8) == 1);
+ BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
}
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 6a65c6d6d5..aaf6aa9696 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -475,10 +475,12 @@ armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
switch (tensorPtr->dataType())
{
case DataType_QuantisedAsymm8:
- type = armnn::DataType::QuantisedAsymm8;
+ case DataType_QAsymmU8:
+ type = armnn::DataType::QAsymmU8;
break;
+ case DataType_QSymmS16:
case DataType_QuantisedSymm16:
- type = armnn::DataType::QuantisedSymm16;
+ type = armnn::DataType::QSymmS16;
break;
case DataType_Signed32:
type = armnn::DataType::Signed32;
diff --git a/src/armnnDeserializer/test/DeserializeActivation.cpp b/src/armnnDeserializer/test/DeserializeActivation.cpp
index 9e98893038..44765d56a0 100644
--- a/src/armnnDeserializer/test/DeserializeActivation.cpp
+++ b/src/armnnDeserializer/test/DeserializeActivation.cpp
@@ -144,7 +144,7 @@ struct SimpleActivationFixture4 : ActivationFixture
BOOST_FIXTURE_TEST_CASE(ActivationReluQuantisedAsymm8, SimpleActivationFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer", {10, 0, 2, 0}}},
{{"OutputLayer", {10, 0, 2, 0}}});
@@ -161,7 +161,7 @@ BOOST_FIXTURE_TEST_CASE(ActivationReluFloat32, SimpleActivationFixture2)
BOOST_FIXTURE_TEST_CASE(ActivationBoundedReluQuantisedAsymm8, SimpleActivationFixture3)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer", {10, 0, 2, 0}}},
{{"OutputLayer", {5, 0, 2, 0}}});
diff --git a/src/armnnDeserializer/test/DeserializeAdd.cpp b/src/armnnDeserializer/test/DeserializeAdd.cpp
index be292bc304..325bb6e1c3 100644
--- a/src/armnnDeserializer/test/DeserializeAdd.cpp
+++ b/src/armnnDeserializer/test/DeserializeAdd.cpp
@@ -145,7 +145,7 @@ struct SimpleAddFixture2 : AddFixture
BOOST_FIXTURE_TEST_CASE(AddQuantisedAsymm8, SimpleAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer1", { 0, 1, 2, 3 }},
{"InputLayer2", { 4, 5, 6, 7 }}},
diff --git a/src/armnnDeserializer/test/DeserializeComparison.cpp b/src/armnnDeserializer/test/DeserializeComparison.cpp
index 6616398b73..9881b9e61e 100644
--- a/src/armnnDeserializer/test/DeserializeComparison.cpp
+++ b/src/armnnDeserializer/test/DeserializeComparison.cpp
@@ -242,11 +242,21 @@ DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less, Float32)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, Float32)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, Float32)
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, QuantisedAsymm8)
+ARMNN_NO_DEPRECATE_WARN_END
+
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, QAsymmU8)
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnDeserializer/test/DeserializeConstant.cpp b/src/armnnDeserializer/test/DeserializeConstant.cpp
index 0abe5e6ca1..cb0e4ad42f 100644
--- a/src/armnnDeserializer/test/DeserializeConstant.cpp
+++ b/src/armnnDeserializer/test/DeserializeConstant.cpp
@@ -143,7 +143,7 @@ struct SimpleConstantAddFixture : ConstantAddFixture
BOOST_FIXTURE_TEST_CASE(SimpleConstantAddQuantisedAsymm8, SimpleConstantAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 1, 2, 3, 4, 5, 6 },
{ 2, 4, 6, 8, 10, 12 });
diff --git a/src/armnnDeserializer/test/DeserializeDivision.cpp b/src/armnnDeserializer/test/DeserializeDivision.cpp
index dc6f5820cf..d12b043ce0 100644
--- a/src/armnnDeserializer/test/DeserializeDivision.cpp
+++ b/src/armnnDeserializer/test/DeserializeDivision.cpp
@@ -141,7 +141,7 @@ struct SimpleDivisionFixture2 : DivisionFixture
BOOST_FIXTURE_TEST_CASE(DivisionQuantisedAsymm8, SimpleDivisionFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer1", { 0, 5, 24, 21 }},
{"InputLayer2", { 4, 1, 6, 7 }}},
diff --git a/src/armnnDeserializer/test/DeserializeFullyConnected.cpp b/src/armnnDeserializer/test/DeserializeFullyConnected.cpp
index 77d0acc782..90698cb993 100644
--- a/src/armnnDeserializer/test/DeserializeFullyConnected.cpp
+++ b/src/armnnDeserializer/test/DeserializeFullyConnected.cpp
@@ -131,7 +131,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer", { 10, 20, 30, 40 }}},
{{"OutputLayer", { 400/2 }}});
diff --git a/src/armnnDeserializer/test/DeserializeMultiplication.cpp b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
index f784ba6f31..c0bb13e17b 100644
--- a/src/armnnDeserializer/test/DeserializeMultiplication.cpp
+++ b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
@@ -145,7 +145,7 @@ struct SimpleMultiplicationFixture2 : MultiplicationFixture
BOOST_FIXTURE_TEST_CASE(MultiplicationQuantisedAsymm8, SimpleMultiplicationFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer1", { 0, 1, 2, 3 }},
{"InputLayer2", { 4, 5, 6, 7 }}},
diff --git a/src/armnnDeserializer/test/DeserializePad.cpp b/src/armnnDeserializer/test/DeserializePad.cpp
index b18710a381..d9087d963e 100644
--- a/src/armnnDeserializer/test/DeserializePad.cpp
+++ b/src/armnnDeserializer/test/DeserializePad.cpp
@@ -112,7 +112,7 @@ struct SimplePadFixture : PadFixture
BOOST_FIXTURE_TEST_CASE(SimplePadQuantisedAsymm8, SimplePadFixture)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<3, armnn::DataType::QAsymmU8>(0,
{
0, 4, 2, 5, 6, 1, 5, 2
},
diff --git a/src/armnnDeserializer/test/DeserializePermute.cpp b/src/armnnDeserializer/test/DeserializePermute.cpp
index 6d08b5fee9..be50a67328 100644
--- a/src/armnnDeserializer/test/DeserializePermute.cpp
+++ b/src/armnnDeserializer/test/DeserializePermute.cpp
@@ -112,7 +112,7 @@ struct SimplePermute2DFixture : PermuteFixture
BOOST_FIXTURE_TEST_CASE(SimplePermute2DQuantisedAsymm8, SimplePermute2DFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6 },
{ 1, 4, 2, 5, 3, 6 });
}
@@ -127,7 +127,7 @@ struct SimplePermute4DFixture : PermuteFixture
BOOST_FIXTURE_TEST_CASE(SimplePermute4DQuantisedAsymm8, SimplePermute4DFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<4, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
{ 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22,
diff --git a/src/armnnDeserializer/test/DeserializePooling2d.cpp b/src/armnnDeserializer/test/DeserializePooling2d.cpp
index 55fb655b34..b63aeb5fdc 100644
--- a/src/armnnDeserializer/test/DeserializePooling2d.cpp
+++ b/src/armnnDeserializer/test/DeserializePooling2d.cpp
@@ -141,7 +141,7 @@ BOOST_FIXTURE_TEST_CASE(Pooling2dFloat32Avg, SimpleAvgPooling2dFixture)
BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Avg, SimpleAvgPooling2dFixture2)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<4, armnn::DataType::QAsymmU8>(0,
{ 20, 40, 60, 80 },
{ 50 });
}
@@ -153,7 +153,7 @@ BOOST_FIXTURE_TEST_CASE(Pooling2dFloat32Max, SimpleMaxPooling2dFixture)
BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Max, SimpleMaxPooling2dFixture2)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<4, armnn::DataType::QAsymmU8>(0,
{ 20, 40, 60, 80 },
{ 80 });
}
diff --git a/src/armnnDeserializer/test/DeserializeReshape.cpp b/src/armnnDeserializer/test/DeserializeReshape.cpp
index 301d8986c0..554b867db7 100644
--- a/src/armnnDeserializer/test/DeserializeReshape.cpp
+++ b/src/armnnDeserializer/test/DeserializeReshape.cpp
@@ -112,7 +112,7 @@ struct SimpleReshapeFixture2 : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ReshapeQuantisedAsymm8, SimpleReshapeFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
}
diff --git a/src/armnnDeserializer/test/DeserializeSubtraction.cpp b/src/armnnDeserializer/test/DeserializeSubtraction.cpp
index 5058bb840d..a4bd0fbeb4 100644
--- a/src/armnnDeserializer/test/DeserializeSubtraction.cpp
+++ b/src/armnnDeserializer/test/DeserializeSubtraction.cpp
@@ -148,7 +148,7 @@ struct SimpleSubtractionFixtureBroadcast : SubtractionFixture
BOOST_FIXTURE_TEST_CASE(SubtractionQuantisedAsymm8, SimpleSubtractionFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputLayer1", { 4, 5, 6, 7 }},
{"inputLayer2", { 3, 2, 1, 0 }}},
diff --git a/src/armnnQuantizer/ArmNNQuantizerMain.cpp b/src/armnnQuantizer/ArmNNQuantizerMain.cpp
index 227a105bba..30167e73f2 100644
--- a/src/armnnQuantizer/ArmNNQuantizerMain.cpp
+++ b/src/armnnQuantizer/ArmNNQuantizerMain.cpp
@@ -37,8 +37,8 @@ int main(int argc, char* argv[])
armnn::QuantizerOptions quantizerOptions;
quantizerOptions.m_ActivationFormat = cmdline.GetQuantizationScheme() == "QSymm16"
- ? armnn::DataType::QuantisedSymm16
- : armnn::DataType::QuantisedAsymm8;
+ ? armnn::DataType::QSymmS16
+ : armnn::DataType::QAsymmU8;
quantizerOptions.m_PreserveType = cmdline.HasPreservedDataType();
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index bad95cfc56..0d30d96452 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -30,10 +30,12 @@ enum ArgMinMaxFunction : byte {
enum DataType : byte {
Float16 = 0,
Float32 = 1,
- QuantisedAsymm8 = 2,
+ QuantisedAsymm8 = 2, // deprecated
Signed32 = 3,
Boolean = 4,
- QuantisedSymm16 = 5
+ QuantisedSymm16 = 5, // deprecated
+ QAsymmU8 = 6,
+ QSymmS16 = 7
}
enum DataLayout : byte {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 608a9c3480..be6fa64b0a 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1405,7 +1405,7 @@ flatbuffers::Offset<serializer::ConstTensor>
fbPayload = flatBuffersData.o;
break;
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
@@ -1414,7 +1414,7 @@ flatbuffers::Offset<serializer::ConstTensor>
fbPayload = flatBuffersData.o;
break;
}
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::Boolean:
default:
{
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 908da6450c..df1ef285de 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -36,9 +36,9 @@ armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType da
case armnn::DataType::Signed32:
return armnnSerializer::ConstTensorData::ConstTensorData_IntData;
case armnn::DataType::Float16:
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return armnnSerializer::ConstTensorData::ConstTensorData_ShortData;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::Boolean:
return armnnSerializer::ConstTensorData::ConstTensorData_ByteData;
default:
@@ -56,10 +56,10 @@ armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
return armnnSerializer::DataType::DataType_Float16;
case armnn::DataType::Signed32:
return armnnSerializer::DataType::DataType_Signed32;
- case armnn::DataType::QuantisedSymm16:
- return armnnSerializer::DataType::DataType_QuantisedSymm16;
- case armnn::DataType::QuantisedAsymm8:
- return armnnSerializer::DataType::DataType_QuantisedAsymm8;
+ case armnn::DataType::QSymmS16:
+ return armnnSerializer::DataType::DataType_QSymmS16;
+ case armnn::DataType::QAsymmU8:
+ return armnnSerializer::DataType::DataType_QAsymmU8;
case armnn::DataType::Boolean:
return armnnSerializer::DataType::DataType_Boolean;
default:
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 8dfca3c52e..3e67cf062f 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -206,7 +206,7 @@ void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTen
CompareConstTensorData<const float*>(
tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::Boolean:
CompareConstTensorData<const uint8_t*>(
tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
@@ -770,7 +770,7 @@ BOOST_AUTO_TEST_CASE(SerializeDequantize)
DECLARE_LAYER_VERIFIER_CLASS(Dequantize)
const std::string layerName("dequantize");
- const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QuantisedAsymm8, 0.5f, 1);
+ const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QAsymmU8, 0.5f, 1);
const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32);
armnn::INetworkPtr network = armnn::INetwork::Create();
@@ -1152,8 +1152,8 @@ BOOST_AUTO_TEST_CASE(SerializeGather)
};
const std::string layerName("gather");
- armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QuantisedAsymm8);
- armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QAsymmU8);
+ armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QAsymmU8);
const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32);
paramsInfo.SetQuantizationScale(1.0f);
@@ -3994,7 +3994,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape inputToInputWeightsShape = {4, 2};
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
@@ -4002,7 +4002,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape inputToForgetWeightsShape = {4, 2};
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
@@ -4010,7 +4010,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape inputToCellWeightsShape = {4, 2};
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
@@ -4018,7 +4018,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape inputToOutputWeightsShape = {4, 2};
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
@@ -4027,7 +4027,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape recurrentToInputWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
@@ -4035,7 +4035,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
@@ -4043,7 +4043,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
@@ -4051,7 +4051,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
@@ -4114,15 +4114,15 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
// Connect up
armnn::TensorInfo inputTensorInfo({ batchSize, inputSize },
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits },
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize },
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 9c7dda8aec..22d65645a3 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -309,7 +309,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
switch (tensorPtr->type)
{
case tflite::TensorType_UINT8:
- type = armnn::DataType::QuantisedAsymm8;
+ type = armnn::DataType::QAsymmU8;
break;
case tflite::TensorType_FLOAT32:
type = armnn::DataType::Float32;
@@ -318,7 +318,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
type = armnn::DataType::QSymmS8;
break;
case tflite::TensorType_INT16:
- type = armnn::DataType::QuantisedSymm16;
+ type = armnn::DataType::QSymmS16;
break;
case tflite::TensorType_INT32:
type = armnn::DataType::Signed32;
@@ -2818,7 +2818,7 @@ TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
tensorPtr,
tensorInfo,
permutationVector);
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
tensorPtr,
tensorInfo,
diff --git a/src/armnnTfLiteParser/test/Addition.cpp b/src/armnnTfLiteParser/test/Addition.cpp
index 94389d3134..deeb707a2f 100644
--- a/src/armnnTfLiteParser/test/Addition.cpp
+++ b/src/armnnTfLiteParser/test/Addition.cpp
@@ -97,7 +97,7 @@ struct SimpleAddFixture : AddFixture
BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp
index a39c088d44..a56e7e7362 100644
--- a/src/armnnTfLiteParser/test/AvgPool2D.cpp
+++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp
@@ -98,7 +98,7 @@ struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture
BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 });
+ RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
@@ -108,13 +108,13 @@ BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutpu
BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
}
BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Concatenation.cpp b/src/armnnTfLiteParser/test/Concatenation.cpp
index d3d571f174..8e31a3edb0 100644
--- a/src/armnnTfLiteParser/test/Concatenation.cpp
+++ b/src/armnnTfLiteParser/test/Concatenation.cpp
@@ -100,7 +100,7 @@ struct ConcatenationFixtureNegativeDim : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
@@ -114,7 +114,7 @@ struct ConcatenationFixtureNCHW : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
@@ -128,7 +128,7 @@ struct ConcatenationFixtureNHWC : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
@@ -142,7 +142,7 @@ struct ConcatenationFixtureDim1 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
@@ -161,7 +161,7 @@ struct ConcatenationFixtureDim3 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3,
4, 5, 6, 7,
@@ -196,7 +196,7 @@ struct ConcatenationFixture3DDim0 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim0, ConcatenationFixture3DDim0)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3, 4, 5 } },
{ "inputTensor2", { 6, 7, 8, 9, 10, 11,
@@ -213,7 +213,7 @@ struct ConcatenationFixture3DDim1 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim1, ConcatenationFixture3DDim1)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3, 4, 5 } },
{ "inputTensor2", { 6, 7, 8, 9, 10, 11,
@@ -230,7 +230,7 @@ struct ConcatenationFixture3DDim2 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim2, ConcatenationFixture3DDim2)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2,
3, 4, 5 } },
diff --git a/src/armnnTfLiteParser/test/Constant.cpp b/src/armnnTfLiteParser/test/Constant.cpp
index 356e1b769d..cc89223469 100644
--- a/src/armnnTfLiteParser/test/Constant.cpp
+++ b/src/armnnTfLiteParser/test/Constant.cpp
@@ -103,7 +103,7 @@ struct SimpleConstantAddFixture : ConstantAddFixture
BOOST_FIXTURE_TEST_CASE(SimpleConstantAdd, SimpleConstantAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputTensor", { 0, 1, 2, 3 }}},
{{"OutputTensor", { 4, 6, 8, 10 }}}
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index 38c6675ddb..2eae5f5a1a 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -89,7 +89,7 @@ struct SimpleConv2DFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2, 3,
@@ -219,7 +219,7 @@ struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
@@ -290,7 +290,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture
uint8_t outZero = 20;
uint8_t fz = 4; // filter zero point
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
@@ -331,7 +331,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixtu
{
uint8_t relu6Min = 6 / 2; // divide by output scale
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
index c0767801b3..2bf08fa79f 100644
--- a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -133,7 +133,7 @@ struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ 0, 1, 2,
3, 4, 5,
@@ -160,7 +160,7 @@ struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ 0, 1, 2,
3, 4, 5,
@@ -185,7 +185,7 @@ struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ 0, 1, 2,
3, 4, 5,
diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp
index 2f98c07a66..79dfe2e26a 100644
--- a/src/armnnTfLiteParser/test/Dequantize.cpp
+++ b/src/armnnTfLiteParser/test/Dequantize.cpp
@@ -82,7 +82,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8 , armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::QAsymmU8 , armnn::DataType::Float32>(
0,
{{"inputTensor", { 0u, 1u, 5u, 100u, 200u, 255u }}},
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}});
@@ -97,7 +97,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16)
{
- RunTest<2, armnn::DataType::QuantisedSymm16 , armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::QSymmS16 , armnn::DataType::Float32>(
0,
{{"inputTensor", { 0, 1, 5, 32767, -1, -32768 }}},
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index 1ec87f97d5..f12b2b94d6 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -220,7 +220,7 @@ BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, ParseDetectionPostProcessCus
{ "num_detections", numDetections}
};
- RunTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float32>(0, input, output);
+ RunTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(0, input, output);
}
BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPostProcessCustomOptions)
@@ -288,8 +288,8 @@ BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPo
BOOST_TEST(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
// Check the connections
- armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QuantisedAsymm8, 1, 1);
- armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QuantisedAsymm8,
+ armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QAsymmU8, 1, 1);
+ armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QAsymmU8,
0.00999999978f, 0);
armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32, 0, 0);
diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp
index 54d7bcb1dc..d1223d5af2 100644
--- a/src/armnnTfLiteParser/test/FullyConnected.cpp
+++ b/src/armnnTfLiteParser/test/FullyConnected.cpp
@@ -125,7 +125,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 10, 20, 30, 40 },
{ 400/2 });
@@ -145,7 +145,7 @@ struct FullyConnectedWithBiasFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 10, 20, 30, 40 },
{ (400+10)/2 });
@@ -165,7 +165,7 @@ struct FullyConnectedWithBiasMultipleOutputsFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithBiasMultipleOutputs, FullyConnectedWithBiasMultipleOutputsFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 1, 2, 3, 4, 10, 20, 30, 40 },
{ (40+10)/2, (400+10)/2 });
diff --git a/src/armnnTfLiteParser/test/MaxPool2D.cpp b/src/armnnTfLiteParser/test/MaxPool2D.cpp
index 759fc37ccd..8cbef97e2f 100644
--- a/src/armnnTfLiteParser/test/MaxPool2D.cpp
+++ b/src/armnnTfLiteParser/test/MaxPool2D.cpp
@@ -98,7 +98,7 @@ struct MaxPoolLiteFixtureUint2DOutput : MaxPool2DFixture
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 });
+ RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
@@ -108,13 +108,13 @@ BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutpu
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Reshape.cpp b/src/armnnTfLiteParser/test/Reshape.cpp
index 62fbad6953..6ed568ceff 100644
--- a/src/armnnTfLiteParser/test/Reshape.cpp
+++ b/src/armnnTfLiteParser/test/Reshape.cpp
@@ -86,7 +86,7 @@ struct ReshapeFixtureWithReshapeDims : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
@@ -101,7 +101,7 @@ struct ReshapeFixtureWithReshapeDimsFlatten : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<1, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<1, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
@@ -116,7 +116,7 @@ struct ReshapeFixtureWithReshapeDimsFlattenTwoDims : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
@@ -131,7 +131,7 @@ struct ReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<3, armnn::DataType::QAsymmU8>(0,
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp
index dacd946352..c4d19f2ac0 100644
--- a/src/armnnTfLiteParser/test/Softmax.cpp
+++ b/src/armnnTfLiteParser/test/Softmax.cpp
@@ -71,7 +71,7 @@ struct SoftmaxFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+ RunTest<2, armnn::DataType::QAsymmU8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Split.cpp b/src/armnnTfLiteParser/test/Split.cpp
index 977bd7b6a4..5f23799fd6 100644
--- a/src/armnnTfLiteParser/test/Split.cpp
+++ b/src/armnnTfLiteParser/test/Split.cpp
@@ -179,7 +179,7 @@ struct SimpleSplitFixtureUint8 : SplitFixture
BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwoUint8, SimpleSplitFixtureUint8)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16 } } },
@@ -196,7 +196,7 @@ struct SimpleSplitAxisThreeFixtureUint8 : SplitFixture
BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwoUint8, SimpleSplitAxisThreeFixtureUint8)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16 } } },
@@ -213,7 +213,7 @@ struct SimpleSplit2DFixtureUint8 : SplitFixture
BOOST_FIXTURE_TEST_CASE(SimpleSplit2DUint8, SimpleSplit2DFixtureUint8)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8 } } },
{ {"outputTensor1", { 1, 2, 3, 4 } },
@@ -229,7 +229,7 @@ struct SimpleSplit3DFixtureUint8 : SplitFixture
BOOST_FIXTURE_TEST_CASE(SimpleSplit3DUint8, SimpleSplit3DFixtureUint8)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16 } } },
diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp
index 13261facf1..86a1966dd1 100644
--- a/src/armnnTfLiteParser/test/Squeeze.cpp
+++ b/src/armnnTfLiteParser/test/Squeeze.cpp
@@ -85,7 +85,7 @@ struct SqueezeFixtureWithSqueezeDims : SqueezeFixture
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<3, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2,1})));
@@ -99,7 +99,7 @@ struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<2, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2})));
}
diff --git a/src/armnnTfLiteParser/test/Sub.cpp b/src/armnnTfLiteParser/test/Sub.cpp
index 0a3f58b519..6a251a5f74 100644
--- a/src/armnnTfLiteParser/test/Sub.cpp
+++ b/src/armnnTfLiteParser/test/Sub.cpp
@@ -97,7 +97,7 @@ struct SimpleSubFixture : SubFixture
BOOST_FIXTURE_TEST_CASE(SimpleSub, SimpleSubFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 4, 5, 6, 7 }},
{"inputTensor2", { 3, 2, 1, 0 }}},
diff --git a/src/armnnTfLiteParser/test/TransposeConv.cpp b/src/armnnTfLiteParser/test/TransposeConv.cpp
index 46b02ac956..084a286dbd 100644
--- a/src/armnnTfLiteParser/test/TransposeConv.cpp
+++ b/src/armnnTfLiteParser/test/TransposeConv.cpp
@@ -118,7 +118,7 @@ struct SimpleTransposeConvFixture : TransposeConvFixture
BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConv, SimpleTransposeConvFixture )
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
diff --git a/src/armnnTfLiteParser/test/Unpack.cpp b/src/armnnTfLiteParser/test/Unpack.cpp
index 04fd50dc39..4fcd74f585 100644
--- a/src/armnnTfLiteParser/test/Unpack.cpp
+++ b/src/armnnTfLiteParser/test/Unpack.cpp
@@ -126,7 +126,7 @@ BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxi
BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecifiedUint8, DefaultUnpackAxisZeroUint8Fixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12,
@@ -165,7 +165,7 @@ BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
}
BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSixUint8, DefaultUnpackLastAxisUint8Fixture) {
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor", { 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12,
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index 9a9cdc5156..39dee679fd 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -194,7 +194,7 @@ private:
switch (dataType)
{
case DataType::Float32: return "FLOAT32";
- case DataType::QuantisedAsymm8: return "UINT8";
+ case DataType::QAsymmU8: return "UINT8";
default: return "UNKNOWN";
}
}
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 9250b61ec9..1cad92f58a 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -23,9 +23,9 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType)
return arm_compute::DataType::F16;
case armnn::DataType::Float32:
return arm_compute::DataType::F32;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return arm_compute::DataType::QASYMM8;
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return arm_compute::DataType::QSYMM16;
case armnn::DataType::QSymmS8:
return arm_compute::DataType::QSYMM8;
diff --git a/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp b/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
index 1e2f0db600..4ab748806c 100644
--- a/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
+++ b/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
@@ -15,7 +15,7 @@ BOOST_AUTO_TEST_CASE(BuildArmComputeTensorInfoTest)
{
const armnn::TensorShape tensorShape = { 1, 2, 3, 4 };
- const armnn::DataType dataType = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType dataType = armnn::DataType::QAsymmU8;
const std::vector<float> quantScales = { 1.5f, 2.5f, 3.5f, 4.5f };
const float quantScale = quantScales[0];
diff --git a/src/backends/backendsCommon/LayerSupportRules.hpp b/src/backends/backendsCommon/LayerSupportRules.hpp
index 08189f9999..d8b6af8a30 100644
--- a/src/backends/backendsCommon/LayerSupportRules.hpp
+++ b/src/backends/backendsCommon/LayerSupportRules.hpp
@@ -23,9 +23,9 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::Float16:
case armnn::DataType::Float32:
return weightsType;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return armnn::DataType::Signed32;
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 9d8174ce7d..75db73c32f 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -56,13 +56,13 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
return MakeWorkloadForType<Float16Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Float32:
return MakeWorkloadForType<Float32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Signed32:
return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Boolean:
return MakeWorkloadForType<BooleanWorkload>::Func(descriptor, info, std::forward<Args>(args)...);
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
return nullptr;
default:
BOOST_ASSERT_MSG(false, "Unknown DataType.");
diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp
index e03068618b..f7895a6f1d 100644
--- a/src/backends/backendsCommon/Workload.hpp
+++ b/src/backends/backendsCommon/Workload.hpp
@@ -172,7 +172,7 @@ template <typename QueueDescriptor>
using Float32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Float32>;
template <typename QueueDescriptor>
-using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QuantisedAsymm8>;
+using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QAsymmU8>;
template <typename QueueDescriptor>
using Int32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Signed32>;
@@ -187,7 +187,7 @@ using BaseFloat32ComparisonWorkload = MultiTypedWorkload<QueueDescriptor,
template <typename QueueDescriptor>
using BaseUint8ComparisonWorkload = MultiTypedWorkload<QueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
armnn::DataType::Boolean>;
template <typename QueueDescriptor>
@@ -202,7 +202,7 @@ using Float32ToFloat16Workload = MultiTypedWorkload<QueueDescriptor,
template <typename QueueDescriptor>
using Uint8ToFloat32Workload = MultiTypedWorkload<QueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
armnn::DataType::Float32>;
} //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index c3dd601fbd..fa5c6fe38e 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -30,9 +30,9 @@ DataType GetBiasDataType(DataType inputDataType)
return DataType::Float16;
case DataType::Float32:
return DataType::Float32;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
return DataType::Signed32;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
return DataType::Signed32;
default:
BOOST_ASSERT_MSG(false, "Invalid input data type");
@@ -342,11 +342,11 @@ void ValidateWeightDataType(const TensorInfo& inputInfo,
const std::string& descName)
{
const DataType inputType = inputInfo.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
const std::vector<DataType> validTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
@@ -403,7 +403,7 @@ void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
const DataType outputDataType = outputInfo.GetDataType();
const bool canHavePerAxisQuantization =
- inputDataType == DataType::QuantisedAsymm8 && inputDataType == outputDataType;
+ inputDataType == DataType::QAsymmU8 && inputDataType == outputDataType;
if (!canHavePerAxisQuantization)
{
@@ -580,8 +580,8 @@ void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -608,8 +608,8 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Signed32
};
@@ -665,8 +665,8 @@ void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -687,8 +687,8 @@ void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
@@ -823,8 +823,8 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
@@ -910,8 +910,8 @@ void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
@@ -971,8 +971,8 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -994,8 +994,8 @@ void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1019,8 +1019,8 @@ void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
@@ -1053,8 +1053,8 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
@@ -1087,8 +1087,8 @@ void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInf
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1159,8 +1159,8 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
@@ -1232,8 +1232,8 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
@@ -1287,8 +1287,8 @@ void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1312,8 +1312,8 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1359,8 +1359,8 @@ void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1460,8 +1460,8 @@ void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1511,8 +1511,8 @@ void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float32,
DataType::Float16,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
@@ -1536,8 +1536,8 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float32,
DataType::Float16,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1601,8 +1601,8 @@ void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1626,8 +1626,8 @@ void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1674,7 +1674,7 @@ void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1705,7 +1705,7 @@ void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
// check for supported type of one input and match them with all the other input and output
@@ -2016,8 +2016,8 @@ void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
@@ -2047,8 +2047,8 @@ void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
@@ -2080,8 +2080,8 @@ void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Float32,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2110,8 +2110,8 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
// First check if input tensor data type is supported, then
@@ -2178,9 +2178,9 @@ void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
- if (outputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
+ if (outputTensorInfo.GetDataType() != DataType::QAsymmU8 &&
outputTensorInfo.GetDataType() != DataType::QSymmS8 &&
- outputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
+ outputTensorInfo.GetDataType() != DataType::QSymmS16)
{
throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
}
@@ -2200,8 +2200,8 @@ void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2222,8 +2222,8 @@ void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2279,8 +2279,8 @@ void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Float32,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2367,8 +2367,8 @@ void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2395,8 +2395,8 @@ void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2441,8 +2441,8 @@ void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadI
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
@@ -2531,8 +2531,8 @@ void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2574,8 +2574,8 @@ void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2636,8 +2636,8 @@ void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2662,17 +2662,17 @@ void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
std::vector<DataType> inputOutputSupportedTypes =
{
- DataType::QuantisedAsymm8
+ DataType::QAsymmU8
};
std::vector<DataType> cellStateSupportedTypes =
{
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
std::vector<DataType> weightsSupportedTypes =
{
- DataType::QuantisedAsymm8
+ DataType::QAsymmU8
};
std::vector<DataType> biasSupportedTypes =
@@ -2831,8 +2831,8 @@ void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2911,8 +2911,8 @@ void DepthToSpaceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index a4327e441a..54ae585a82 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -347,8 +347,8 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
biasInfoPtr = &dummyFloat32Bias;
break;
}
- case DataType::QuantisedAsymm8:
- case DataType::QuantisedSymm16:
+ case DataType::QAsymmU8:
+ case DataType::QSymmS16:
{
biasInfoPtr = &dummyQA8Bias;
break;
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 1a45a9f6e9..cb1f7c117a 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -164,7 +164,7 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle*
weightPermuted =
ReorderWeightChannelsForAcl<half_float::half>(weightPermuted, dataLayout, permuteBuffer);
break;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
break;
case DataType::QuantizedSymm8PerAxis:
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index d6f589fa00..4b9bf7a711 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -88,7 +88,7 @@ inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
{
- TensorInfo commonTensorInfo({ 2, 3 }, DataType::QuantisedAsymm8);
+ TensorInfo commonTensorInfo({ 2, 3 }, DataType::QAsymmU8);
const float scale = 0.023529f;
const int8_t offset = -43;
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 6924beb820..031210f1fc 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -316,22 +316,22 @@ struct DummyLayer<armnn::QuantizedLstmLayer, void>
m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 9273a7910f..850a4d37ee 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -147,12 +147,12 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// set the tensors in the network
- TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
inputTensorInfo.SetQuantizationOffset(100);
inputTensorInfo.SetQuantizationScale(10000.0f);
input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
outputTensorInfo.SetQuantizationOffset(0);
outputTensorInfo.SetQuantizationScale(1.0f / 256.0f);
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index 08f696812e..eb1b976656 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -172,9 +172,9 @@ BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
float outputQScale = 2.0f;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
- DataType::QuantisedAsymm8, inputsQScale, 0));
+ DataType::QAsymmU8, inputsQScale, 0));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
- GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale));
+ GetBiasDataType(DataType::QAsymmU8), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -183,8 +183,8 @@ BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale));
- Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale));
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
+ Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
// check the constants that they are not NULL
BOOST_CHECK(layer->m_Weight != nullptr);
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index ca3c563757..162cc8436c 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -207,8 +207,8 @@ void CheckUntouchedSubgraph(const SubgraphView& untouchedSubgraph,
// Creates a subgraph containing only a single unsupported layer (only convolutions are unsupported by the mock backend)
SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
Pooling2dDescriptor poolingDescriptor;
poolingDescriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
@@ -242,8 +242,8 @@ SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph1(Graph& graph, Layer
// Creates a subgraph containing only unsupported layers (only convolutions are unsupported by the mock backend)
SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
Pooling2dDescriptor poolingDescriptor;
poolingDescriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
@@ -285,9 +285,9 @@ SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph2(Graph& graph, Layer
// Creates a simple subgraph with only one convolution layer, supported by the mock backend
SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
@@ -315,9 +315,9 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, Layer
// Creates a subgraph with five convolutions layers, all supported by the mock backend
SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
@@ -362,9 +362,9 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, Layer
// (only convolutions are unsupported by the mock backend)
SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
@@ -421,9 +421,9 @@ SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, Laye
// Creates a subgraph with only unoptimizable layers ("unoptimizable" is added to the layer's name)
SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
@@ -452,9 +452,9 @@ SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, Lay
// Creates a subgraph with some unoptimizable layers ("unoptimizable" is added to the layer's name)
SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
@@ -501,9 +501,9 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, L
// this is meant to test input slots coming from different layers
SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index 609773ce89..cbba666004 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -43,12 +43,12 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
int32_t cellStateOffset = 0;
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
@@ -121,27 +121,27 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
armnn::IConnectableLayer* const outputStateOut = net->AddOutputLayer(1);
armnn::TensorInfo inputTensorInfo({batchSize , inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateInTensorInfo({batchSize , outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateOutTensorInfo({batchSize, outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputTensorInfo({batchSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
@@ -178,23 +178,23 @@ IsCloseEnough(T value1, T value2, T tolerance)
void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
{
std::vector<uint8_t> inputVector = {166, 179, 50, 150};
- armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, inputVector);
std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
- armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QuantisedSymm16);
+ armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
boost::multi_array<int16_t, 2> cellStateIn = MakeTensor<int16_t, 2>(cellStateInDesc, cellStateInVector);
std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
- armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> outputStateIn = MakeTensor<uint8_t, 2>(outputStateInDesc, outputStateInVector);
std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
- armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QuantisedSymm16);
+ armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
boost::multi_array<int16_t, 2> cellStateOut = MakeTensor<int16_t, 2>(cellStateOutVectorDesc, cellStateOutVector);
std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
- armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> outputStateOut = MakeTensor<uint8_t, 2>(outputDesc, outputStateOutVector);
// Builds up the structure of the network
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index b5acd88e89..3c47eab01f 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -615,7 +615,7 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
const TensorShape biasShape { cOutput };
- constexpr DataType inputType = DataType::QuantisedAsymm8;
+ constexpr DataType inputType = DataType::QAsymmU8;
constexpr DataType weightType = DataType::QuantizedSymm8PerAxis;
constexpr DataType biasType = DataType::Signed32;
diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
index 552eab2cae..0b0f265db4 100644
--- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
+++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
@@ -98,9 +98,9 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::Float16:
case armnn::DataType::Float32:
return weightsType;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return armnn::DataType::Signed32;
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
index ff76a38b08..4ec20d87d7 100644
--- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
@@ -71,7 +71,7 @@ LayerTestResult<T, 2> Abs2dTest(
float qScale = 0.0625f;
int32_t qOffset = 64;
- if (ArmnnType == armnn::DataType::QuantisedSymm16)
+ if (ArmnnType == armnn::DataType::QSymmS16)
{
qScale = 0.1f;
qOffset = 0;
@@ -117,7 +117,7 @@ LayerTestResult<T, 3> Abs3dTest(
float qScale = 0.0625f;
int32_t qOffset = 64;
- if (ArmnnType == armnn::DataType::QuantisedSymm16)
+ if (ArmnnType == armnn::DataType::QSymmS16)
{
qScale = 0.1f;
qOffset = 0;
@@ -218,13 +218,13 @@ Abs2dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-Abs2dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Abs2dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Abs2dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Abs2dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -238,13 +238,13 @@ Abs3dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-Abs3dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Abs3dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Abs3dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Abs3dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 2f2d8dbd38..87f42194b9 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -176,7 +176,7 @@ LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
float outputScale = 6.0f / 255.0f;
int32_t outputOffset = 0;
- return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 6.0f, 0.0f,
inputScale, inputOffset, outputScale, outputOffset,
input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
@@ -205,7 +205,7 @@ LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
int32_t inputOffset = 112;
float inputScale = 0.0125f;
- return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 1.0f, -1.0f,
inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
@@ -377,7 +377,7 @@ LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 4.0f, 3);
}
@@ -385,7 +385,7 @@ LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedSymm16>(
+ return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, 0.1f, 0);
}
@@ -508,14 +508,14 @@ LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
+ return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
}
LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -557,7 +557,7 @@ LayerTestResult<int16_t, 4> ReLuInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
@@ -565,7 +565,7 @@ LayerTestResult<uint8_t, 4> ReLuUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 0);
+ return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
}
LayerTestResult<float, 4> ReLuTest(
@@ -616,7 +616,7 @@ LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
@@ -667,14 +667,14 @@ LayerTestResult<uint8_t, 4> SoftReLuUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SoftReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> SoftReLuInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SoftReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -724,14 +724,14 @@ LayerTestResult<uint8_t, 4> LeakyReLuUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LeakyReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LeakyReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -780,14 +780,14 @@ LayerTestResult<uint8_t, 4> AbsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AbsTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> AbsInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AbsTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
LayerTestResult<float, 5> SqrtNNTest(
@@ -892,14 +892,14 @@ LayerTestResult<uint8_t, 4> SqrtUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SqrtTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> SqrtInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SqrtTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -948,14 +948,14 @@ LayerTestResult<uint8_t, 4> SquareUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SquareTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> SquareInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SquareTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1006,14 +1006,14 @@ LayerTestResult<uint8_t, 4> TanhUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return TanhTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 64);
+ return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
}
LayerTestResult<int16_t, 4> TanhInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return TanhTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
@@ -1130,7 +1130,7 @@ LayerTestResult<uint8_t,4> CompareActivationUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::ActivationFunction f)
{
- return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
}
@@ -1140,6 +1140,6 @@ LayerTestResult<int16_t,4> CompareActivationInt16Test(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::ActivationFunction f)
{
- return CompareActivationTestImpl<armnn::DataType::QuantisedSymm16>(
+ return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
}
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index a3a21ab131..82dc59b66b 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -331,7 +331,7 @@ LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 2.f, 0);
}
@@ -339,7 +339,7 @@ LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
+ return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, 2.f, 0);
}
@@ -355,7 +355,7 @@ LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.1333333f, 128);
}
@@ -363,7 +363,7 @@ LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, 0.1333333f, 0);
}
@@ -392,7 +392,7 @@ LayerTestResult<uint8_t, 4> AdditionUint8Test(
255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
});
- return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -434,7 +434,7 @@ LayerTestResult<int16_t, 4> AdditionInt16Test(
329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
};
- return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index 2733100d6c..d63c179dfd 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -264,12 +264,12 @@ ArgMaxSimpleTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxSimpleTest<armnn::DataType::QuantisedSymm16>(
+ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -284,12 +284,12 @@ ArgMinSimpleTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinSimpleTest<armnn::DataType::QuantisedSymm16>(
+ArgMinSimpleTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -304,12 +304,12 @@ ArgMinChannelTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinChannelTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinChannelTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinChannelTest<armnn::DataType::QuantisedSymm16>(
+ArgMinChannelTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -324,12 +324,12 @@ ArgMaxChannelTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxChannelTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxChannelTest<armnn::DataType::QuantisedSymm16>(
+ArgMaxChannelTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -349,7 +349,7 @@ ArgMaxHeightTest<armnn::DataType::Signed32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxHeightTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -364,6 +364,6 @@ ArgMinWidthTest<armnn::DataType::Signed32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinWidthTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinWidthTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index 7857b3531b..7a55146b37 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -432,7 +432,7 @@ LayerTestResult<uint8_t, 4> BatchNormUint8Test(
2.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
inputOutputShape,
@@ -482,7 +482,7 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
inputOutputShape, inputValues, expectedOutputValues,
@@ -524,7 +524,7 @@ LayerTestResult<int16_t, 4> BatchNormInt16Test(
2.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+ return BatchNormTestImpl<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
inputOutputShape,
@@ -574,7 +574,7 @@ LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+ return BatchNormTestImpl<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
inputOutputShape,
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index c8272f47f0..7327536dff 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -408,7 +408,7 @@ LayerTestResult<uint8_t, 4> EqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
@@ -419,7 +419,7 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
@@ -430,7 +430,7 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
@@ -507,7 +507,7 @@ LayerTestResult<uint8_t, 4> GreaterSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
@@ -518,7 +518,7 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
@@ -529,7 +529,7 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
@@ -607,7 +607,7 @@ LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
@@ -618,7 +618,7 @@ LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
@@ -629,7 +629,7 @@ LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
@@ -706,7 +706,7 @@ LayerTestResult<uint8_t, 4> LessSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
@@ -717,7 +717,7 @@ LayerTestResult<uint8_t, 4> LessBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
@@ -728,7 +728,7 @@ LayerTestResult<uint8_t, 4> LessBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
@@ -806,7 +806,7 @@ LayerTestResult<uint8_t, 4> LessOrEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
@@ -817,7 +817,7 @@ LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
@@ -828,7 +828,7 @@ LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
@@ -906,7 +906,7 @@ LayerTestResult<uint8_t, 4> NotEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
@@ -917,7 +917,7 @@ LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
@@ -928,7 +928,7 @@ LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 57ed7542d2..29cd5ac560 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -2054,14 +2054,14 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
// Explicit template specializations
//
-template LayerTestResult<ResolveType<DataType::QuantisedAsymm8>, 3>
-ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>(
+template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor);
-template LayerTestResult<ResolveType<DataType::QuantisedSymm16>, 3>
-ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>(
+template LayerTestResult<ResolveType<DataType::QSymmS16>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor);
@@ -2362,9 +2362,9 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
unsigned int inputChannels2 = 1;
// Defines the tensor descriptors.
- TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
// Quantized input1 tensor. Range [-3, 1]
const float inputScale1 = 0.015686f;
@@ -2507,9 +2507,9 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
unsigned int inputChannels2 = 1;
// Defines the tensor descriptors.
- TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
// Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
const float scale = 0.13497836f;
@@ -2645,9 +2645,9 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
unsigned int inputChannels2 = 1;
// Defines the tensor descriptors.
- TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16);
- TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16);
- TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16);
+ TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
+ TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
+ TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
// Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
const float scale = 0.13497836f;
@@ -2765,28 +2765,28 @@ LayerTestResult<uint8_t, 1> Concat1dUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat1dTestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim0DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
@@ -2794,7 +2794,7 @@ LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
@@ -2802,14 +2802,14 @@ LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
@@ -2817,7 +2817,7 @@ LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concat3dDim2TestImpl<DataType::QuantisedAsymm8>(
+ return Concat3dDim2TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
@@ -2825,14 +2825,14 @@ LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
@@ -2841,7 +2841,7 @@ LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concat3dDim2DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
@@ -2849,28 +2849,28 @@ LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDim2TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
{
- return Concat4dDim3TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDim3TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
@@ -2878,7 +2878,7 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDiffShapeDim0TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
@@ -2886,7 +2886,7 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDiffShapeDim1TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
@@ -2894,7 +2894,7 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDiffShapeDim2TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
@@ -2903,6 +2903,6 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concat4dDiffShapeDim3TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index cfb62637d1..35868405f1 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -134,26 +134,26 @@ LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 2e-6f, 1);
}
LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 2e-6f, 1);
}
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index a00fda7679..055c9ab6e8 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -1018,13 +1018,13 @@ LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
int32_t qOffset;
switch (ArmnnType)
{
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
qScale = 0.1f;
qOffset = 128;
break;
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
qScale = 0.1f;
qOffset = 0;
@@ -2304,13 +2304,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
int32_t qOffset;
switch (ArmnnType)
{
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
qScale = 0.1f;
qOffset = 128;
break;
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
qScale = 0.1f;
qOffset = 0;
@@ -2800,15 +2800,15 @@ Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
@@ -2821,15 +2821,15 @@ Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Flo
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
@@ -2842,15 +2842,15 @@ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, arm
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
bool biasEnabled,
@@ -2863,15 +2863,15 @@ DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataTy
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
@@ -2884,15 +2884,15 @@ DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::Data
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
@@ -2932,7 +2932,7 @@ LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -2981,7 +2981,7 @@ LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -2991,7 +2991,7 @@ LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -3001,7 +3001,7 @@ LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -3038,7 +3038,7 @@ LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
}
@@ -3049,7 +3049,7 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
{
using namespace armnn;
- const DataType inputType = DataType::QuantisedAsymm8;
+ const DataType inputType = DataType::QAsymmU8;
const DataType kernelType = DataType::QuantizedSymm8PerAxis;
const DataType biasType = DataType::Signed32;
@@ -3220,7 +3220,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -3230,7 +3230,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -3252,7 +3252,7 @@ LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -3262,7 +3262,7 @@ LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -3273,7 +3273,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
{
using namespace armnn;
- const DataType inputType = DataType::QuantisedAsymm8;
+ const DataType inputType = DataType::QAsymmU8;
const DataType kernelType = DataType::QuantizedSymm8PerAxis;
const DataType biasType = DataType::Signed32;
@@ -3390,6 +3390,6 @@ LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
const armnn::DataLayout layout)
{
- return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, layout);
}
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index a4db5686b6..eef8372add 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -314,54 +314,54 @@ LayerTestResult<uint8_t, 4> Debug4dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> Debug3dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> Debug2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 1> Debug1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> Debug4dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> Debug3dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> Debug2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 1> Debug1dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index 5e5cba349e..96fa24a0cb 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -284,51 +284,51 @@ DepthToSpaceTest4<armnn::DataType::Float16>(
armnn::DataLayout dataLayout);
// QuantisedAsymm8
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest1<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest1<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest2<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest2<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest3<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest3<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest4<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest4<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
// QuantisedSymm16
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest1<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest1<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest2<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest2<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest3<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest3<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest4<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest4<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
index fb225aeb54..7a757d59ee 100644
--- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
@@ -134,14 +134,14 @@ LayerTestResult<float, 4> DequantizeSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return DequantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> DequantizeOffsetUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return DequantizeOffsetTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> DequantizeSimpleInt8Test(
@@ -155,14 +155,14 @@ LayerTestResult<float, 4> DequantizeSimpleInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return DequantizeSimpleTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<armnn::Half, 4> DequantizeSimpleUint8ToFp16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float16>(workloadFactory,
+ return DequantizeSimpleTest<armnn::DataType::QAsymmU8, armnn::DataType::Float16>(workloadFactory,
memoryManager);
}
@@ -177,6 +177,6 @@ LayerTestResult<armnn::Half, 4> DequantizeSimpleInt16ToFp16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Float16>(workloadFactory,
+ return DequantizeSimpleTest<armnn::DataType::QSymmS16, armnn::DataType::Float16>(workloadFactory,
memoryManager);
}
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
index b908f96e9f..223beb49e8 100644
--- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
@@ -288,7 +288,7 @@ LayerTestResult<uint8_t, 4> DivisionUint8Test(
4, 4, 4, 4, 5, 5, 5, 5
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape,
@@ -314,7 +314,7 @@ LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
std::vector<uint8_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -348,7 +348,7 @@ LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
13, 14, 15, 16, 17, 18
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -383,7 +383,7 @@ LayerTestResult<int16_t,4> DivisionInt16Test(
4, 4, 4, 4, 5, 5, 5, 5
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
@@ -409,7 +409,7 @@ LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
std::vector<int16_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
@@ -443,7 +443,7 @@ LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
13, 14, 15, 16, 17, 18
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
index a3d29dac71..ebad7fc91c 100644
--- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
@@ -71,7 +71,7 @@ SimpleFloorTest<armnn::DataType::Float16>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleFloorTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleFloorTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index a3fe8582c8..34bd9ec75e 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -229,14 +229,14 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
// Explicit template specializations
//
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-FullyConnectedTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+FullyConnectedTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-FullyConnectedTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+FullyConnectedTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled);
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index 1ccf51c7d2..c6f58057c5 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -315,14 +315,14 @@ LayerTestResult<uint8_t, 1> Gather1dParamsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+ return GatherTestHelper<armnn::DataType::QAsymmU8>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 1> Gather1dParamsInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedSymm16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+ return GatherTestHelper<armnn::DataType::QSymmS16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> GatherMultiDimParamsFloat32Test(
@@ -343,7 +343,7 @@ LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsTestImpl(
+ return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsTestImpl(
workloadFactory, memoryManager);
}
@@ -351,7 +351,7 @@ LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsTestImpl(
+ return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsTestImpl(
workloadFactory, memoryManager);
}
@@ -375,7 +375,7 @@ LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsMultiDimIndicesTestImpl(
+ return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsMultiDimIndicesTestImpl(
workloadFactory, memoryManager);
}
@@ -383,6 +383,6 @@ LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
+ return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
workloadFactory, memoryManager);
}
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index 4b16921990..e500a126f6 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -599,7 +599,7 @@ LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
@@ -614,7 +614,7 @@ LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
@@ -644,7 +644,7 @@ LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
@@ -659,7 +659,7 @@ LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
@@ -748,7 +748,7 @@ LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
@@ -763,7 +763,7 @@ LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
@@ -793,7 +793,7 @@ LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
@@ -808,7 +808,7 @@ LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index e755aa54cb..c61a0526a1 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -1573,17 +1573,17 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
// Input/Output tensor info
armnn::TensorInfo inputInfo({numBatches , inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
@@ -1635,12 +1635,12 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
// Weights and bias tensor and quantization info
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
@@ -1965,8 +1965,8 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
const float qScale = 1.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
- const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType datatype = armnn::DataType::QSymmS16;
+ const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({2, 2}, datatype);
boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
@@ -1995,8 +1995,8 @@ LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
const float qScale = 1.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
- const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType datatype = armnn::DataType::QSymmS16;
+ const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
boost::multi_array<int16_t, 2> input =
@@ -2026,8 +2026,8 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
const float qScale = 2.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
- const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType datatype = armnn::DataType::QSymmS16;
+ const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
boost::multi_array<int16_t, 2> input =
@@ -2068,7 +2068,7 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Const
const float qScale = 1.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
+ const armnn::DataType datatype = armnn::DataType::QSymmS16; // datatype & constants set to QSymm16
armnn::TensorInfo inputDesc({2, 2}, datatype);
boost::multi_array<int16_t , 2> input =
@@ -2098,11 +2098,11 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
{166, 179, 50, 150}));
- armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
{140, 151, 146, 112, 136, 156, 142, 112 }));
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
index 0218697038..5147cffddb 100644
--- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
@@ -238,7 +238,7 @@ LayerTestResult<uint8_t, 4> MaximumUint8Test(
4, 4, 4, 4, 5, 5, 5, 5
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape,
@@ -270,7 +270,7 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
7, 8, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -301,7 +301,7 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
7, 10, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -327,7 +327,7 @@ LayerTestResult<int16_t, 4> MaximumInt16Test(
std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
@@ -359,7 +359,7 @@ LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
7, 8, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
@@ -391,7 +391,7 @@ LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
7, 10, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
index ed12c7fa2c..a0a4029115 100644
--- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
@@ -86,7 +86,7 @@ LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
1, 1, 2, 1, 2, 3
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -226,7 +226,7 @@ LayerTestResult<int16_t, 4> MinimumInt16Test(
3, 3, 3, 3, 4, 4, 4, 4
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
@@ -258,7 +258,7 @@ LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
2, 2, 2, 2, 2, 2
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
@@ -290,7 +290,7 @@ LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
1, 8, 3, 1, 10, 3
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
index a39e6bd827..d32e0cf89b 100644
--- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
@@ -216,7 +216,7 @@ LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
};
// Scale/offset chosen to have output values out of range
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape,
@@ -254,7 +254,7 @@ LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
14, 16, 18, 20, 22, 24
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -286,7 +286,7 @@ LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7, 16, 27, 10, 22, 36
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -321,7 +321,7 @@ LayerTestResult<int16_t, 4> MultiplicationInt16Test(
84, 104, 126, 150, 176, 204
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
@@ -353,7 +353,7 @@ LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
14, 16, 18, 20, 22, 24
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
@@ -385,7 +385,7 @@ LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7, 16, 27, 10, 22, 36
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 3a8d2b7bbf..9239c665eb 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -416,23 +416,23 @@ LayerTestResult<T, 4> Pad4dTestCommon(
// Explicit template specializations
//
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Pad2dTestCommon<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset,
const float customPaddingValue);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Pad3dTestCommon<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Pad4dTestCommon<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
@@ -446,28 +446,28 @@ LayerTestResult<uint8_t, 2> PadUint82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
+ return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
}
LayerTestResult<uint8_t, 3> PadUint83dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> PadUint84dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> PadFloat322dTest(
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index 160e6582d5..b58e9826b8 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -1366,7 +1366,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
}
@@ -1375,7 +1375,7 @@ LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, forceNoPadding);
}
@@ -1393,7 +1393,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
}
@@ -1402,7 +1402,7 @@ LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, forceNoPadding);
}
@@ -1419,7 +1419,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
@@ -1427,7 +1427,7 @@ LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -1440,7 +1440,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 1.0f, -5);
}
@@ -1448,7 +1448,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
@@ -1463,7 +1463,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 1.0f, -5);
}
@@ -1471,7 +1471,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
@@ -1488,7 +1488,7 @@ LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, dataLayout, 0.5, -1);
}
@@ -1497,7 +1497,7 @@ LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, dataLayout);
}
@@ -1521,7 +1521,7 @@ LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5, -1);
}
@@ -1529,7 +1529,7 @@ LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
@@ -1543,7 +1543,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager);
}
@@ -1551,7 +1551,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
@@ -1567,7 +1567,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Tes
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager);
}
@@ -1575,7 +1575,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Tes
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
@@ -1590,7 +1590,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager);
}
@@ -1598,7 +1598,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
@@ -1615,7 +1615,7 @@ LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
@@ -1623,7 +1623,7 @@ LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
@@ -1637,14 +1637,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
@@ -1658,14 +1658,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
armnn::IWorkloadFactory& workloadFactory,
@@ -1678,14 +1678,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize7Test(
@@ -1699,14 +1699,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize9Test(
@@ -1720,14 +1720,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -1740,14 +1740,14 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
@@ -1761,14 +1761,14 @@ LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
@@ -1782,14 +1782,14 @@ LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> ComparePooling2dTest(
@@ -1808,7 +1808,7 @@ LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
}
@@ -1818,6 +1818,6 @@ LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType);
}
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index e23f92a5a9..ab6a35b16f 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -130,14 +130,14 @@ LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int8_t, 4> QuantizeClampInt8Test(
@@ -151,5 +151,5 @@ LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index 485e7eab80..894ece65a5 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -176,13 +176,13 @@ SimpleReshapeTest<armnn::DataType::Float32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleReshapeTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleReshapeTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleReshapeTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -191,12 +191,12 @@ Reshape5dTest<armnn::DataType::Float32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 5>
-Reshape5dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 5>
+Reshape5dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 5>
-Reshape5dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 5>
+Reshape5dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index 080155eebf..0389e82c7a 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -664,62 +664,62 @@ ResizeNearestNeighborMagTest<armnn::DataType::Float16>(
int32_t outQuantOffset);
// QAsymm8
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearNopTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearMagTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout,
@@ -729,62 +729,62 @@ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>(
int32_t outQuantOffset);
// QSymm16
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearNopTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearNopTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleResizeBilinearTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearSqMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearMagTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearMagTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborMagTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout,
diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
index 24a3b21e96..4107e13eb8 100644
--- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
@@ -227,13 +227,13 @@ Rsqrt2dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-Rsqrt2dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Rsqrt2dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Rsqrt2dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Rsqrt2dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -247,13 +247,13 @@ Rsqrt3dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-Rsqrt3dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Rsqrt3dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Rsqrt3dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Rsqrt3dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index 65b17164f3..fc78074a43 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -246,48 +246,48 @@ LayerTestResult<float, 1> Slice1dFloat32Test(armnn::IWorkloadFactory& workloadFa
LayerTestResult<uint8_t, 4> Slice4dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> Slice3dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> Slice2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 1> Slice1dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice1dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
// Int16 tests
LayerTestResult<int16_t, 4> Slice4dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> Slice3dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> Slice2dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 1> Slice1dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice1dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 2a1aa76fce..4147cc8516 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -602,7 +602,7 @@ LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta);
}
LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
@@ -611,7 +611,7 @@ LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
float beta)
{
Simple3dSoftmaxOutputData data;
- return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return Simple3dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
beta,
@@ -627,7 +627,7 @@ LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
{
Simple4dSoftmaxData data;
- return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
+ return Simple4dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta,
data.inputShape, data.outputData, data.inputData);
}
@@ -664,7 +664,7 @@ LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta);
}
LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
@@ -673,7 +673,7 @@ LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
float beta)
{
Simple3dSoftmaxOutputData data;
- return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
+ return Simple3dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
data.inputShape, data.outputData, data.inputData);
}
@@ -684,7 +684,7 @@ LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
{
Simple4dSoftmaxData data;
- return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
+ return Simple4dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
data.inputShape, data.outputData, data.inputData);
}
@@ -704,6 +704,6 @@ LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, beta);
}
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index 2793875c5b..afb4796703 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -337,28 +337,28 @@ LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdSimpleNhwcFloat32Test(
@@ -421,82 +421,82 @@ LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index b6bf530da3..59e1481ad1 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -159,7 +159,7 @@ LayerTestResult<uint8_t, 4> SpaceToDepthNhwcAsymmQ8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
+ return SpaceToDepthSimpleTest1<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager);
}
@@ -168,7 +168,7 @@ LayerTestResult<uint8_t, 4> SpaceToDepthNchwAsymmQ8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
+ return SpaceToDepthSimpleTest1<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
armnn::DataLayout::NCHW);
@@ -235,7 +235,7 @@ LayerTestResult<int16_t, 4> SpaceToDepthNhwcQSymm16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
+ return SpaceToDepthSimpleTest2<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager);
}
@@ -244,7 +244,7 @@ LayerTestResult<int16_t, 4> SpaceToDepthNchwQSymm16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
+ return SpaceToDepthSimpleTest2<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
armnn::DataLayout::NCHW);
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index c8c2f9c7d1..ef81a1dd1d 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -341,14 +341,14 @@ std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+ return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 3> CopyViaSplitterFloat32Test(
@@ -369,12 +369,12 @@ LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
}
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index 23f5df0df9..c6c330e875 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -959,42 +959,42 @@ LayerTestResult<uint8_t, 4> StridedSlice4dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4dReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice4dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0Dim3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0Dim3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1002,7 +1002,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1010,7 +1010,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition1Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition1Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1018,7 +1018,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition2Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition2Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1026,7 +1026,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1034,7 +1034,7 @@ LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0And1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And1Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And1Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1042,7 +1042,7 @@ LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0And2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And2Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And2Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1050,7 +1050,7 @@ LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0And3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1058,7 +1058,7 @@ LayerTestResult<uint8_t, 1> StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8T
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And1And3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And1And3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
@@ -1066,89 +1066,89 @@ LayerTestResult<uint8_t, 3> StridedSlice3dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3dReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice3dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2dReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice2dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSlice4dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSlice4dReverseInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice4dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> StridedSlice3dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> StridedSlice3dReverseInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice3dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> StridedSlice2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> StridedSlice2dReverseInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice2dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
index d180021639..525fb46d56 100644
--- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
@@ -27,7 +27,7 @@ LayerTestResult<uint8_t, 4> SubtractionUint8Test(
std::vector<uint8_t> input1 = { 1, 2, 1, 2 };
std::vector<uint8_t> output = { 3, 3, 5, 5 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -57,7 +57,7 @@ LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
std::vector<uint8_t> output = { 5, 6, 7, 8 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -87,7 +87,7 @@ LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
std::vector<uint8_t> output = { 8, 11, 12, 15 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
@@ -254,7 +254,7 @@ LayerTestResult<int16_t, 4> SubtractionInt16Test(
std::vector<int16_t> input1 = { 1, 2, 1, 2 };
std::vector<int16_t> output = { 3, 3, 5, 5 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
@@ -284,7 +284,7 @@ LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
std::vector<int16_t> output = { 3, 4, 5, 6 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
@@ -314,7 +314,7 @@ LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
std::vector<int16_t> output = { 8, 11, 12, 15 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 4b4894f4d2..3ac25f0534 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -566,7 +566,7 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
{
using namespace armnn;
- const DataType inputType = DataType::QuantisedAsymm8;
+ const DataType inputType = DataType::QAsymmU8;
const DataType kernelType = DataType::QuantizedSymm8PerAxis;
const DataType biasType = DataType::Signed32;
@@ -672,15 +672,15 @@ SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Floa
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
@@ -693,15 +693,15 @@ PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Floa
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
@@ -714,15 +714,15 @@ StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Flo
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-StridedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-StridedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
@@ -734,14 +734,14 @@ MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout);
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index bce91ab462..f7129d6035 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -195,7 +195,7 @@ bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
const ArgMinMaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if (input.GetDataType() == DataType::QuantisedAsymm8)
+ if (input.GetDataType() == DataType::QAsymmU8)
{
return false;
}
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 3c80ea9d49..d79745c420 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -127,7 +127,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
@@ -365,7 +365,7 @@ BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
{
- ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
+ ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QAsymmU8>();
}
template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
@@ -522,7 +522,7 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
{
- ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QuantisedAsymm8);
+ ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
}
template <typename armnn::DataType DataType>
@@ -555,7 +555,7 @@ BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
{
- ClCreateReshapeWorkloadTest<armnn::DataType::QuantisedAsymm8>();
+ ClCreateReshapeWorkloadTest<armnn::DataType::QAsymmU8>();
}
template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
@@ -834,7 +834,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat16NchwWorkload)
BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
{
- ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
@@ -849,7 +849,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat16NhwcWorkload)
BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
{
- ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
template <typename MeanWorkloadType, typename armnn::DataType DataType>
@@ -883,7 +883,7 @@ BOOST_AUTO_TEST_CASE(CreateMeanFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload)
{
- ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QuantisedAsymm8>();
+ ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QAsymmU8>();
}
template <typename ConcatWorkloadType, armnn::DataType DataType>
@@ -923,17 +923,17 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
template <typename SpaceToDepthWorkloadType, typename armnn::DataType DataType>
@@ -965,12 +965,12 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
{
- ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
{
- ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
template <armnn::DataType DataType>
@@ -1013,7 +1013,7 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
- ClCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ ClCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
template <typename QuantizedLstmWorkloadType>
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 5d7a2f5666..260f8f68cd 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -43,7 +43,7 @@ BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Test)
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test)
{
- ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
@@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test)
{
- ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
@@ -63,7 +63,7 @@ BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test)
{
- ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// DepthToSpace
@@ -79,12 +79,12 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
@@ -99,23 +99,23 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Dequantize
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
@@ -133,7 +133,7 @@ BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
@@ -153,7 +153,7 @@ BOOST_AUTO_TEST_CASE(ClGreaterBroadcastEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
@@ -186,7 +186,7 @@ BOOST_AUTO_TEST_CASE(ClPreluEndToEndFloat32Test)
BOOST_AUTO_TEST_CASE(ClPreluEndToEndTestUint8)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSpaceToDepthNhwcEndToEndTest1)
@@ -216,7 +216,7 @@ BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndTest)
BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndUint8Test)
{
- Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndTest)
@@ -231,12 +231,12 @@ BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndTest)
BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndUint8Test)
{
- Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndUint8Test)
{
- Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndTest)
@@ -256,17 +256,17 @@ BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndTest)
BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndUint8Test)
{
- Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter3dDim1EndToEndUint8Test)
{
- Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndUint8Test)
{
- Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndTest)
@@ -291,22 +291,22 @@ BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndTest)
BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndUint8Test)
{
- Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim1EndToEndUint8Test)
{
- Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim2EndToEndUint8Test)
{
- Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndUint8Test)
{
- Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// TransposeConvolution2d
@@ -318,7 +318,7 @@ BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNchwTest)
BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
@@ -330,7 +330,7 @@ BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNhwcTest)
BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
index bcf057b1fb..35e1172161 100644
--- a/src/backends/cl/test/ClLayerSupportTests.cpp
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -40,7 +40,7 @@ BOOST_FIXTURE_TEST_CASE(IsLayerSupportedUint8Cl, ClContextControlFixture)
{
armnn::ClWorkloadFactory factory =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
- IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+ IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
}
BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture)
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index c821dd94c3..76e5174cb7 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -55,20 +55,20 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1<DataTyp
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
// Fully Connected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
@@ -104,11 +104,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
@@ -120,11 +120,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
@@ -136,11 +136,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
@@ -199,7 +199,7 @@ ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, false)
+ ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
// Normalization
ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
@@ -388,15 +388,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
@@ -408,22 +408,22 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
// Pad
@@ -437,24 +437,24 @@ ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
// PReLU
ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QAsymmU8>)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
// Lstm
ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
@@ -493,13 +493,13 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
// Minimum
ARMNN_AUTO_TEST_CASE(MinimumBroadcast1Element1, MinimumBroadcast1ElementTest1)
@@ -639,25 +639,25 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
// Resize Bilinear - NHWC
@@ -665,25 +665,25 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
// Resize NearestNeighbor - NCHW
@@ -691,31 +691,31 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
// Resize NearestNeighbor - NHWC
@@ -723,31 +723,31 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
// Rsqrt
@@ -777,11 +777,11 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -794,11 +794,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -811,11 +811,11 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -828,11 +828,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -845,11 +845,11 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -862,11 +862,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -877,10 +877,10 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc,
MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NHWC)
// Abs
diff --git a/src/backends/cl/workloads/ClGreaterWorkload.cpp b/src/backends/cl/workloads/ClGreaterWorkload.cpp
index 435ead853f..b086122bdc 100644
--- a/src/backends/cl/workloads/ClGreaterWorkload.cpp
+++ b/src/backends/cl/workloads/ClGreaterWorkload.cpp
@@ -60,6 +60,6 @@ void ClGreaterWorkload<T>::Execute() const
}
template class ClGreaterWorkload<DataType::Float32>;
-template class ClGreaterWorkload<DataType::QuantisedAsymm8>;
+template class ClGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClGreaterWorkload.hpp b/src/backends/cl/workloads/ClGreaterWorkload.hpp
index 69d017e6c6..84a24fffa3 100644
--- a/src/backends/cl/workloads/ClGreaterWorkload.hpp
+++ b/src/backends/cl/workloads/ClGreaterWorkload.hpp
@@ -29,6 +29,6 @@ private:
};
using ClGreaterFloat32Workload = ClGreaterWorkload<DataType::Float32>;
-using ClGreaterUint8Workload = ClGreaterWorkload<DataType::QuantisedAsymm8>;
+using ClGreaterUint8Workload = ClGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index ecaf2799e8..c5cfcd8fc1 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -98,7 +98,7 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
case DataType::Float32:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<float>());
break;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
break;
case DataType::QuantizedSymm8PerAxis:
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 6a4f612881..a08c8f7d2a 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -152,7 +152,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
- DataType::QuantisedAsymm8>();
+ DataType::QAsymmU8>();
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -178,7 +178,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- DataType::QuantisedAsymm8>();
+ DataType::QAsymmU8>();
}
template <typename WorkloadType,
@@ -445,12 +445,12 @@ BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
{
- NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
{
- NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NHWC);
}
static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
@@ -493,7 +493,7 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
{
- NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QuantisedAsymm8);
+ NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
}
template <typename armnn::DataType DataType>
@@ -527,7 +527,7 @@ BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
{
- NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
+ NeonCreateReshapeWorkloadTest<DataType::QAsymmU8>();
}
template <typename ResizeWorkloadType, armnn::DataType DataType>
@@ -563,7 +563,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
{
- NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
@@ -573,7 +573,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
{
- NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
@@ -634,12 +634,12 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
{
- NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+ NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
{
- NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+ NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
@@ -859,17 +859,17 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
template <armnn::DataType DataType>
@@ -914,7 +914,7 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
- NeonCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ NeonCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
template <typename QuantizedLstmWorkloadType>
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 2f4c847971..e1c929b17b 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -92,7 +92,7 @@ BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
@@ -112,7 +112,7 @@ BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
@@ -124,7 +124,7 @@ BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
{
- ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
@@ -134,7 +134,7 @@ BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
{
- ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
@@ -144,7 +144,7 @@ BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
{
- ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// DepthToSpace
@@ -160,12 +160,12 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
@@ -180,23 +180,23 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Dequantize
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
@@ -206,7 +206,7 @@ BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
BOOST_AUTO_TEST_CASE(NeonPreluEndToEndTestUint8Test)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest1)
@@ -236,7 +236,7 @@ BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndTest)
BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndUint8Test)
{
- Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndTest)
@@ -251,12 +251,12 @@ BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndTest)
BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndUint8Test)
{
- Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndUint8Test)
{
- Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndTest)
@@ -276,17 +276,17 @@ BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndTest)
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndUint8Test)
{
- Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndUint8Test)
{
- Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndUint8Test)
{
- Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndTest)
@@ -311,22 +311,22 @@ BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndTest)
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndUint8Test)
{
- Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndUint8Test)
{
- Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndUint8Test)
{
- Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndUint8Test)
{
- Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonQuantizedLstmEndToEndTest)
@@ -342,7 +342,7 @@ BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNchwTest)
BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
@@ -354,7 +354,7 @@ BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNhwcTest)
BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
@@ -457,52 +457,52 @@ BOOST_AUTO_TEST_CASE(NeonArgMinAxis3Test)
BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
{
- ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
{
- ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
{
- ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
{
- ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
{
- ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
{
- ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
{
- ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
{
- ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
{
- ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
{
- ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest)
@@ -586,7 +586,7 @@ BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test)
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
@@ -664,7 +664,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index 435afd23a6..57e69ec591 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Neon)
{
armnn::NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
- IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+ IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
}
BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index d645168456..cde1435045 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -58,11 +58,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
@@ -74,11 +74,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
@@ -93,12 +93,12 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
- <DataType::QuantisedAsymm8, DataType::Signed32>,
+ <DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
- <DataType::QuantisedAsymm8, DataType::Signed32>,
+ <DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
@@ -124,15 +124,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
@@ -144,15 +144,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// Depthwise Convolution
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
@@ -445,7 +445,7 @@ ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, false)
+ ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
// Fully Connected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
@@ -453,8 +453,8 @@ ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, tr
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
// Add
ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
@@ -566,7 +566,7 @@ ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<armnn::DataType::Float32>)
// Pad
@@ -580,20 +580,20 @@ ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
// Lstm
ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
@@ -616,13 +616,13 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
@@ -650,19 +650,19 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest<DataType::Float32>
ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest<DataType::Float32>, DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NCHW)
// Resize Bilinear - NHWC data layout
@@ -683,19 +683,19 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NHWC)
// Resize NearestNeighbor - NCHW
@@ -716,19 +716,19 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
// Resize NearestNeighbor - NHWC
@@ -749,19 +749,19 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
// Slice
@@ -834,7 +834,7 @@ ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
// PReLU
ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QAsymmU8>)
// Stack
ARMNN_AUTO_TEST_CASE(Stack0Axis, StackAxis0Float32Test)
@@ -854,11 +854,11 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -871,11 +871,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -888,11 +888,11 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -905,11 +905,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -922,11 +922,11 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -939,11 +939,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
@@ -954,10 +954,10 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc,
MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NHWC)
// Abs
@@ -980,12 +980,12 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
#if defined(ARMNNREF_ENABLED)
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 56e5552dd3..338c7eb1f6 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -72,7 +72,7 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
m_FullyConnectedLayer.reset(layer.release());
// Allocate
- if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
+ if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QAsymmU8)
{
InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
}
diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.cpp b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
index 62396261e1..6380dfada5 100644
--- a/src/backends/neon/workloads/NeonGreaterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
@@ -44,6 +44,6 @@ void NeonGreaterWorkload<T>::Execute() const
}
template class NeonGreaterWorkload<DataType::Float32>;
-template class NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+template class NeonGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.hpp b/src/backends/neon/workloads/NeonGreaterWorkload.hpp
index df1e07e07b..bcab27e7a6 100644
--- a/src/backends/neon/workloads/NeonGreaterWorkload.hpp
+++ b/src/backends/neon/workloads/NeonGreaterWorkload.hpp
@@ -31,6 +31,6 @@ private:
};
using NeonGreaterFloat32Workload = NeonGreaterWorkload<DataType::Float32>;
-using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index e9edc8901e..f98fe44039 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -43,7 +43,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
case DataType::Float32:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
break;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
break;
case DataType::QuantizedSymm8PerAxis:
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index ebcd1f633e..4767aa0b3b 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -75,8 +75,8 @@ bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo&
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -105,8 +105,8 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -168,8 +168,8 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -202,8 +202,8 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const
std::array<DataType, 4> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Signed32
};
@@ -232,8 +232,8 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
@@ -280,8 +280,8 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -322,8 +322,8 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
@@ -351,8 +351,8 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
@@ -377,8 +377,8 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
{
DataType::Float32,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
@@ -439,8 +439,8 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -453,11 +453,11 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
"Reference convolution2d: input and output types mismatched.");
const DataType inputType = input.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
std::array<DataType, 2> supportedWeightTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
@@ -500,8 +500,8 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -528,8 +528,8 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -558,8 +558,8 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -572,11 +572,11 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
"Reference DepthwiseConvolution2d: input and output types mismatched.");
const DataType inputType = input.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
std::array<DataType, 2> supportedWeightTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
@@ -616,9 +616,9 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
bool supported = true;
std::array<DataType,3> supportedInputTypes = {
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
@@ -655,8 +655,8 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod
std::array<DataType,3> supportedInputTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
@@ -688,8 +688,8 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -754,7 +754,7 @@ bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -780,8 +780,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -834,8 +834,8 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -913,8 +913,8 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
@@ -979,7 +979,7 @@ bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
std::array<DataType,2> supportedTypes = {
DataType::Float32,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
// check inputs and outputs
@@ -1081,8 +1081,8 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1119,8 +1119,8 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1185,8 +1185,8 @@ bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Boolean
};
@@ -1212,8 +1212,8 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1247,8 +1247,8 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1284,8 +1284,8 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
@@ -1322,8 +1322,8 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1350,8 +1350,8 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
std::array<DataType,3> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1379,8 +1379,8 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1411,9 +1411,9 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
// Define supported output types.
std::array<DataType,3> supportedOutputTypes = {
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
"Reference quantize: output type not supported.");
@@ -1435,8 +1435,8 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
DataType::Float32,
DataType::Float16,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
"Reference reshape: input type not supported.");
@@ -1451,8 +1451,8 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1478,8 +1478,8 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1503,8 +1503,8 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1533,8 +1533,8 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
std::array<DataType, 3> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1560,8 +1560,8 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1587,8 +1587,8 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1616,8 +1616,8 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1642,8 +1642,8 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1663,8 +1663,8 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1693,8 +1693,8 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
@@ -1723,8 +1723,8 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
std::array<DataType,3> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1749,8 +1749,8 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1785,8 +1785,8 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1821,8 +1821,8 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1836,11 +1836,11 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const DataType inputType = input.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
std::array<DataType, 2> supportedWeightTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index dffb13db2d..b3a0c859fb 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -51,7 +51,7 @@ bool IsFloat16(const WorkloadInfo& info)
bool IsQSymm16(const WorkloadInfo& info)
{
- return IsDataType<DataType::QuantisedSymm16>(info);
+ return IsDataType<DataType::QSymmS16>(info);
}
RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 580d8550f0..23a8e9b9e9 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -68,7 +68,7 @@ BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
{
- RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
}
template <typename WorkloadType,
@@ -101,7 +101,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
AdditionQueueDescriptor,
AdditionLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
@@ -109,7 +109,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
AdditionQueueDescriptor,
AdditionLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
@@ -133,7 +133,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
@@ -157,7 +157,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
@@ -165,7 +165,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
@@ -189,7 +189,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
DivisionQueueDescriptor,
DivisionLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
@@ -197,7 +197,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
DivisionQueueDescriptor,
DivisionLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
@@ -255,25 +255,25 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
(DataLayout::NHWC);
}
@@ -358,8 +358,8 @@ static void RefCreateFullyConnectedWorkloadTest()
auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
CheckInputOutput(std::move(workload),
TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
TensorInfo({ 3, 7 }, DataType, outputQScale));
@@ -372,12 +372,12 @@ BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
{
- RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
{
- RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
}
template <typename NormalizationWorkloadType, armnn::DataType DataType>
@@ -419,22 +419,22 @@ BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
template <typename Pooling2dWorkloadType, armnn::DataType DataType>
@@ -477,22 +477,22 @@ BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
template <typename SoftmaxWorkloadType, armnn::DataType DataType>
@@ -521,12 +521,12 @@ BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
{
- RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
{
- RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
}
template <typename SplitterWorkloadType, armnn::DataType DataType>
@@ -563,7 +563,7 @@ BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
{
- RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
}
template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
@@ -611,7 +611,7 @@ BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
{
- RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QuantisedAsymm8>();
+ RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
}
template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
@@ -661,7 +661,7 @@ BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
{
RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
@@ -704,12 +704,12 @@ BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
{
- RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
{
- RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
@@ -744,12 +744,12 @@ BOOST_AUTO_TEST_CASE(CreateRsqrtFloat16)
BOOST_AUTO_TEST_CASE(CreateRsqrtUint8)
{
- RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateRsqrtQsymm16)
{
- RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QSymmS16>();
}
template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
@@ -777,12 +777,12 @@ BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
{
- RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
{
- RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
}
template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
@@ -825,22 +825,22 @@ BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
template <typename ReshapeWorkloadType, armnn::DataType DataType>
@@ -864,12 +864,12 @@ BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
{
- RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
{
- RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
}
template <typename ConcatWorkloadType, armnn::DataType DataType>
@@ -898,12 +898,12 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
@@ -913,7 +913,7 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
@@ -923,7 +923,7 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
@@ -933,7 +933,7 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
template <typename ConstantWorkloadType, armnn::DataType DataType>
@@ -951,12 +951,12 @@ static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
{
- RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 });
+ RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
}
BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
{
- RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedSymm16>({ 2, 3, 2, 10 });
+ RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
}
BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
@@ -1001,12 +1001,12 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
{
- RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedAsymm8);
+ RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
}
BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
{
- RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedSymm16);
+ RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
}
BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
@@ -1026,14 +1026,14 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
{
BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
- armnn::DataType::QuantisedAsymm8),
+ armnn::DataType::QAsymmU8),
armnn::InvalidArgumentException);
}
BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
{
BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
- armnn::DataType::QuantisedSymm16),
+ armnn::DataType::QSymmS16),
armnn::InvalidArgumentException);
}
@@ -1062,12 +1062,12 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
{
- RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
{
- RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
template <armnn::DataType DataType>
@@ -1103,12 +1103,12 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
- RefCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
{
- RefCreateStackWorkloadTest<armnn::DataType::QuantisedSymm16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 4d8c82d900..75eccdee88 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -37,12 +37,12 @@ BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32)
BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8)
{
- AbsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ AbsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16)
{
- AbsEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ AbsEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
}
// Constant
@@ -75,12 +75,12 @@ BOOST_AUTO_TEST_CASE(Unsigned8)
softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// Sets the tensors in the network.
- TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
inputTensorInfo.SetQuantizationOffset(100);
inputTensorInfo.SetQuantizationScale(10000.0f);
input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
outputTensorInfo.SetQuantizationOffset(0);
outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -369,7 +369,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Equal,
expectedOutput);
}
@@ -379,7 +379,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
@@ -409,7 +409,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Equal,
expectedOutput);
}
@@ -419,7 +419,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
@@ -431,12 +431,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NHWCTest)
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NHWCTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NHWCTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
@@ -446,12 +446,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NCHWTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NCHWTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
@@ -461,12 +461,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NHWCTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
@@ -476,12 +476,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NCHWTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
@@ -491,7 +491,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
{
- ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
@@ -501,7 +501,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
{
- ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
@@ -511,7 +511,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
{
- ConcatDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
@@ -521,7 +521,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
{
- ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
@@ -531,12 +531,12 @@ BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
BOOST_AUTO_TEST_CASE(RefGatherUint8Test)
{
- GatherEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ GatherEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherInt16Test)
{
- GatherEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ GatherEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
@@ -546,12 +546,12 @@ BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test)
{
- GatherMultiDimEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ GatherMultiDimEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherMultiDimInt16Test)
{
- GatherMultiDimEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ GatherMultiDimEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
}
// DepthToSpace
@@ -567,12 +567,12 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
@@ -587,33 +587,33 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Dequantize
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleInt16Test)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetInt16Test)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest)
@@ -697,7 +697,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test)
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
@@ -775,7 +775,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
@@ -793,12 +793,12 @@ BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestFloat32)
BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestUint8)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestQSymm16)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest1)
@@ -829,7 +829,7 @@ BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndTest)
BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndUint8Test)
{
- Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndTest)
@@ -844,12 +844,12 @@ BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndTest)
BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndUint8Test)
{
- Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndUint8Test)
{
- Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndTest)
@@ -869,17 +869,17 @@ BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndTest)
BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndUint8Test)
{
- Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndUint8Test)
{
- Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndUint8Test)
{
- Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndTest)
@@ -904,22 +904,22 @@ BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndTest)
BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndUint8Test)
{
- Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndUint8Test)
{
- Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndUint8Test)
{
- Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test)
{
- Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// TransposeConvolution2d
@@ -931,13 +931,13 @@ BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest)
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
@@ -949,13 +949,13 @@ BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest)
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
@@ -967,12 +967,12 @@ BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNchwTest)
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NchwTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NchwTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
@@ -982,12 +982,12 @@ BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NhwcTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NhwcTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Resize NearestNeighbor
@@ -998,12 +998,12 @@ BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNchwTest)
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NchwTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NchwTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
@@ -1013,12 +1013,12 @@ BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NhwcTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// InstanceNormalization
@@ -1050,7 +1050,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest)
BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test)
{
- ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
@@ -1060,7 +1060,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test)
{
- ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
@@ -1070,7 +1070,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test)
{
- ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
@@ -1081,7 +1081,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test)
{
- ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
@@ -1091,7 +1091,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test)
{
- ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
@@ -1102,7 +1102,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test)
{
- ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
@@ -1112,7 +1112,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test)
{
- ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
@@ -1123,7 +1123,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test)
{
- ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
@@ -1133,7 +1133,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test)
{
- ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
@@ -1144,7 +1144,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test)
{
- ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
#if !defined(__ANDROID__)
diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp
index 106a7f6157..ab0809d90b 100644
--- a/src/backends/reference/test/RefLayerSupportTests.cpp
+++ b/src/backends/reference/test/RefLayerSupportTests.cpp
@@ -63,13 +63,13 @@ BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference)
BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
{
armnn::RefWorkloadFactory factory;
- IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+ IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
}
BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
{
armnn::RefWorkloadFactory factory;
- IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QuantisedSymm16>(&factory);
+ IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
}
BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index b88f432acf..b0d8db802e 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -79,19 +79,19 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int16,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt16,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
@@ -104,19 +104,19 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int16,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt16,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
@@ -129,19 +129,19 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int16,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt16,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
@@ -180,19 +180,19 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcUint8,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int16,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt16,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
@@ -205,19 +205,19 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcUint8,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int16,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt16,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
@@ -465,11 +465,11 @@ ARMNN_AUTO_TEST_CASE(TanhInt16, TanhInt16Test)
// Fully Connected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest<DataType::QuantisedSymm16>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest<DataType::QSymmS16>, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest<DataType::QuantisedSymm16>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest<DataType::QSymmS16>, true)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
@@ -493,9 +493,9 @@ ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest)
ARMNN_AUTO_TEST_CASE(ConcatUint16, ConcatUint16Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, true)
+ ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, true)
ARMNN_AUTO_TEST_CASE(ConcatInt16DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>, true)
+ ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>, true)
// Add
ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
@@ -681,10 +681,10 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16,
SimpleResizeBilinearTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
ResizeBilinearNopTest<DataType::Float32>,
@@ -693,10 +693,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16,
ResizeBilinearNopTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(esizeBilinearNopUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
ResizeBilinearSqMinTest<DataType::Float32>,
@@ -705,10 +705,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16,
ResizeBilinearSqMinTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
ResizeBilinearMinTest<DataType::Float32>,
@@ -717,10 +717,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16,
ResizeBilinearMinTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMag,
ResizeBilinearMagTest<DataType::Float32>,
@@ -729,10 +729,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16,
ResizeBilinearMagTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
// Resize Bilinear - NHWC
@@ -743,10 +743,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16,
ResizeBilinearNopTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
SimpleResizeBilinearTest<DataType::Float32>,
@@ -755,10 +755,10 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16,
SimpleResizeBilinearTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
ResizeBilinearSqMinTest<DataType::Float32>,
@@ -767,10 +767,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16,
ResizeBilinearSqMinTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
ResizeBilinearMinTest<DataType::Float32>,
@@ -779,10 +779,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16,
ResizeBilinearMinTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
ResizeBilinearMagTest<DataType::Float32>,
@@ -791,10 +791,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16,
ResizeBilinearMagTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
// Resize NearestNeighbor - NCHW
@@ -802,46 +802,46 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
// Resize NearestNeighbor - NHWC
@@ -849,46 +849,46 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
// Fake Quantization
@@ -952,10 +952,10 @@ ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
// Constant
ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
@@ -1011,12 +1011,12 @@ ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test,
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(SimpleFloorFloat16, SimpleFloorTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QSymmS16>)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
// Rsqrt
@@ -1026,24 +1026,24 @@ ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedSymm16, Rsqrt3dTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedSymm16, Rsqrt3dTest<DataType::QSymmS16>)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQSymm16, SimplePermuteTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQSymm16, SimplePermuteTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet1Test, PermuteValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet2Test, PermuteValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QSymmS16>)
// Lstm
BOOST_AUTO_TEST_CASE(LstmUtilsZeroVector) {
@@ -1090,21 +1090,21 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedSymm16, MeanSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedSymm16, MeanSimpleAxisTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedSymm16, MeanKeepDimsTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedSymm16, MeanMultipleDimsTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedSymm16, MeanVts1Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedSymm16, MeanVts2Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedSymm16, MeanVts3Test<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedSymm16, MeanSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedSymm16, MeanSimpleAxisTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedSymm16, MeanKeepDimsTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedSymm16, MeanMultipleDimsTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedSymm16, MeanVts1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedSymm16, MeanVts2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedSymm16, MeanVts3Test<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE(AdditionAfterMaxPool, AdditionAfterMaxPoolTest)
@@ -1123,15 +1123,15 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelSigned32, ArgMaxChannelTest<DataType::Signed32
ARMNN_AUTO_TEST_CASE(ArgMaxHeightSigned32, ArgMaxHeightTest<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE(ArgMinWidthSigned32, ArgMinWidthTest<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymm8, ArgMaxChannelTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedSymm16, ArgMaxSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedSymm16, ArgMinSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedSymm16, ArgMinChannelTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedSymm16, ArgMaxChannelTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedSymm16, ArgMaxSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedSymm16, ArgMinSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedSymm16, ArgMinChannelTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedSymm16, ArgMaxChannelTest<DataType::QSymmS16>)
// Space To Batch Nd
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test)
@@ -1191,21 +1191,21 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5<DataTy
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint4, BatchToSpaceNdNhwcTest4<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint5, BatchToSpaceNdNhwcTest5<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint6, BatchToSpaceNdNhwcTest6<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint7, BatchToSpaceNdNhwcTest7<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_1, BatchToSpaceNdNhwcTest1<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_2, BatchToSpaceNdNhwcTest2<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_3, BatchToSpaceNdNhwcTest3<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_4, BatchToSpaceNdNhwcTest4<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5, BatchToSpaceNdNhwcTest5<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_6, BatchToSpaceNdNhwcTest6<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7, BatchToSpaceNdNhwcTest7<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint4, BatchToSpaceNdNhwcTest4<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint5, BatchToSpaceNdNhwcTest5<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint6, BatchToSpaceNdNhwcTest6<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint7, BatchToSpaceNdNhwcTest7<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_1, BatchToSpaceNdNhwcTest1<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_2, BatchToSpaceNdNhwcTest2<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_3, BatchToSpaceNdNhwcTest3<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_4, BatchToSpaceNdNhwcTest4<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5, BatchToSpaceNdNhwcTest5<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_6, BatchToSpaceNdNhwcTest6<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7, BatchToSpaceNdNhwcTest7<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_1, BatchToSpaceNdNchwTest1<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_2, BatchToSpaceNdNchwTest2<DataType::Float16>)
@@ -1215,21 +1215,21 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5<DataTy
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_6, BatchToSpaceNdNchwTest6<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint4, BatchToSpaceNdNchwTest4<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint5, BatchToSpaceNdNchwTest5<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint6, BatchToSpaceNdNchwTest6<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint7, BatchToSpaceNdNchwTest7<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_1, BatchToSpaceNdNchwTest1<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_2, BatchToSpaceNdNchwTest2<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_3, BatchToSpaceNdNchwTest3<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_4, BatchToSpaceNdNchwTest4<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_5, BatchToSpaceNdNchwTest5<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_6, BatchToSpaceNdNchwTest6<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_7, BatchToSpaceNdNchwTest7<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint4, BatchToSpaceNdNchwTest4<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint5, BatchToSpaceNdNchwTest5<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint6, BatchToSpaceNdNchwTest6<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint7, BatchToSpaceNdNchwTest7<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_1, BatchToSpaceNdNchwTest1<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_2, BatchToSpaceNdNchwTest2<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_3, BatchToSpaceNdNchwTest3<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_4, BatchToSpaceNdNchwTest4<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_5, BatchToSpaceNdNchwTest5<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_6, BatchToSpaceNdNchwTest6<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_7, BatchToSpaceNdNchwTest7<DataType::QSymmS16>)
// DepthToSpace
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
@@ -1242,15 +1242,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
@@ -1262,15 +1262,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// SpaceToDepth
ARMNN_AUTO_TEST_CASE(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test)
@@ -1381,10 +1381,10 @@ ARMNN_AUTO_TEST_CASE(Abs3d, Abs3dTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Abs2dFloat16, Abs2dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Abs3dFloat16, Abs3dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(Abs3dQuantisedSymm16, Abs3dTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedSymm16, Abs3dTest<DataType::QSymmS16>)
// Detection PostProcess
BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsFloat)
@@ -1398,22 +1398,22 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat)
BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8)
{
DetectionPostProcessRegularNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedAsymm8>();
+ RefWorkloadFactory, DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsUint8)
{
DetectionPostProcessRegularNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedAsymm8>();
+ RefWorkloadFactory, DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt16)
{
DetectionPostProcessRegularNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedSymm16>();
+ RefWorkloadFactory, DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
{
DetectionPostProcessFastNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedSymm16>();
+ RefWorkloadFactory, DataType::QSymmS16>();
}
// Dequantize
@@ -1434,8 +1434,8 @@ ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test)
// PReLU
ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PreluFloat16, PreluTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest<DataType::QSymmS16>)
// Slice
ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
@@ -1463,19 +1463,19 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt16Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt16Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
@@ -1488,19 +1488,19 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt16Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt16Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
@@ -1513,19 +1513,19 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt16Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt16Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
@@ -1538,19 +1538,19 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt16Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt16Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
@@ -1563,19 +1563,19 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt16Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt16Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
@@ -1588,19 +1588,19 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt16Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt16Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
@@ -1611,16 +1611,16 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc,
MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt16Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt16Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(TransposeConvolution2dPerAxisQuantTestNchw,
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 9d41c9e9e7..faabdcdb3f 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -79,14 +79,14 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
params.second,
params.first);
}
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
{
return std::make_unique<QASymm8Decoder>(
static_cast<const uint8_t*>(data),
info.GetQuantizationScale(),
info.GetQuantizationOffset());
}
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
{
return std::make_unique<QSymm16Decoder>(
static_cast<const int16_t*>(data),
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 92493ed641..4fe202f0bf 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -30,7 +30,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
params.second,
params.first);
}
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
return std::make_unique<QASymm8Encoder>(
static_cast<uint8_t*>(data),
@@ -44,7 +44,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
info.GetQuantizationScale(),
info.GetQuantizationOffset());
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
return std::make_unique<QSymm16Encoder>(
static_cast<int16_t*>(data),
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index 2a3883f8f7..9572f9a2a5 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -46,7 +46,7 @@ void RefDebugWorkload<DataType>::RegisterDebugCallback(const DebugCallbackFuncti
template class RefDebugWorkload<DataType::Float16>;
template class RefDebugWorkload<DataType::Float32>;
-template class RefDebugWorkload<DataType::QuantisedAsymm8>;
-template class RefDebugWorkload<DataType::QuantisedSymm16>;
+template class RefDebugWorkload<DataType::QAsymmU8>;
+template class RefDebugWorkload<DataType::QSymmS16>;
} // namespace armnn
diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp
index 0964515b2c..fc154e9457 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.hpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.hpp
@@ -39,7 +39,7 @@ private:
using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
-using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QuantisedAsymm8>;
-using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QuantisedSymm16>;
+using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
+using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
} // namespace armnn
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index c4b9daeb4c..356f6b1172 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -35,7 +35,7 @@ void RefPadWorkload<DataType>::Execute() const
template class RefPadWorkload<DataType::Float32>;
template class RefPadWorkload<DataType::Float16>;
-template class RefPadWorkload<DataType::QuantisedAsymm8>;
-template class RefPadWorkload<DataType::QuantisedSymm16>;
+template class RefPadWorkload<DataType::QAsymmU8>;
+template class RefPadWorkload<DataType::QSymmS16>;
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp
index d1521f4f8d..28fb55386e 100644
--- a/src/backends/reference/workloads/RefPadWorkload.hpp
+++ b/src/backends/reference/workloads/RefPadWorkload.hpp
@@ -32,7 +32,7 @@ public:
using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
-using RefPadQAsymm8Workload = RefPadWorkload<DataType::QuantisedAsymm8>;
-using RefPadQSymm16Workload = RefPadWorkload<DataType::QuantisedSymm16>;
+using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>;
+using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>;
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp
index 4e7b76bf0a..d0e1431ffd 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.cpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp
@@ -30,7 +30,7 @@ void RefPermuteWorkload<DataType>::Execute() const
template class RefPermuteWorkload<DataType::Float16>;
template class RefPermuteWorkload<DataType::Float32>;
-template class RefPermuteWorkload<DataType::QuantisedAsymm8>;
-template class RefPermuteWorkload<DataType::QuantisedSymm16>;
+template class RefPermuteWorkload<DataType::QAsymmU8>;
+template class RefPermuteWorkload<DataType::QSymmS16>;
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp
index 1e69afb0ed..00a33850aa 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.hpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp
@@ -29,7 +29,7 @@ public:
using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>;
using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>;
-using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QuantisedAsymm8>;
-using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QuantisedSymm16>;
+using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>;
+using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QSymmS16>;
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index a78804b709..31534abe3e 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -43,7 +43,7 @@ void RefQuantizeWorkload::Execute() const
switch(m_TargetType)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
{
QuantizeImpl<uint8_t>(input, output, m_NumElements, m_Scale, m_Offset);
break;
@@ -53,7 +53,7 @@ void RefQuantizeWorkload::Execute() const
QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset);
break;
}
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
{
QuantizeImpl<int16_t>(input, output, m_NumElements, m_Scale, 0);
break;
diff --git a/tests/DeepSpeechV1Database.hpp b/tests/DeepSpeechV1Database.hpp
index a690e3fece..81523775db 100644
--- a/tests/DeepSpeechV1Database.hpp
+++ b/tests/DeepSpeechV1Database.hpp
@@ -83,7 +83,7 @@ inline auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
}
template<>
-inline auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
+inline auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
const float& quantizationScale,
const int32_t& quantizationOffset)
{
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index 4e8fe78ad8..9c23200802 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -225,7 +225,7 @@ public:
}
else if (m_OutputType == "qasymm8")
{
- return armnn::DataType::QuantisedAsymm8;
+ return armnn::DataType::QAsymmU8;
}
else
{
@@ -272,7 +272,7 @@ int main(int argc, char* argv[])
imageDataContainers.push_back(PrepareImageTensor<int>(
imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
imageDataContainers.push_back(PrepareImageTensor<uint8_t>(
imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
break;
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index b1cb5e36f5..4793f822fb 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -60,7 +60,7 @@ NormalizationParameters GetNormalizationParameters(const SupportedFrontend& mode
case armnn::DataType::Signed32:
normParams.mean = { 128.0, 128.0, 128.0 };
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
default:
break;
}
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index 0d7d7689e3..ecfc21209c 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -322,7 +322,7 @@ int main(int argc, char* argv[])
inputTensorDataLayout));
outputDataContainers = { vector<int>(outputNumElements) };
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
inputDataContainers.push_back(
PrepareImageTensor<uint8_t>(imagePath.string(),
inputTensorWidth, inputTensorHeight,
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index f9e9b146d4..ff460dd85e 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -145,14 +145,14 @@ auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
}
template<>
-auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream)
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
{
return ParseArrayImpl<uint8_t>(stream,
[](const std::string& s) { return boost::numeric_cast<uint8_t>(std::stoi(s)); });
}
template<>
-auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
const float& quantizationScale,
const int32_t& quantizationOffset)
{
@@ -309,8 +309,8 @@ void PopulateTensorWithData(TContainer& tensorData,
const int qOffset = qParams.value().second;
tensorData = readFromFile ?
- ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile, qScale, qOffset) :
- GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
+ ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
}
else
{
@@ -328,8 +328,8 @@ void PopulateTensorWithData(TContainer& tensorData,
else if (dataTypeStr.compare("qasymm8") == 0)
{
tensorData = readFromFile ?
- ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile) :
- GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
+ ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
}
else
{