aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp5
-rw-r--r--src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp233
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt1
-rw-r--r--src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp22
-rw-r--r--src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp58
-rw-r--r--src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp16
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp124
-rw-r--r--src/backends/backendsCommon/test/PadEndToEndTestImpl.hpp313
-rw-r--r--src/backends/backendsCommon/test/ReduceEndToEndTestImpl.hpp195
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp77
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp865
-rw-r--r--src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp137
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp119
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp119
-rw-r--r--src/backends/reference/RefLayerSupport.cpp1
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp324
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp37
-rw-r--r--src/backends/reference/workloads/Broadcast.cpp24
-rw-r--r--src/backends/reference/workloads/ElementwiseFunction.cpp4
-rw-r--r--src/backends/reference/workloads/Maximum.hpp22
-rw-r--r--src/backends/reference/workloads/Pad.cpp82
-rw-r--r--src/backends/reference/workloads/RefCastWorkload.cpp67
-rw-r--r--src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp20
-rw-r--r--src/backends/reference/workloads/Slice.cpp26
-rw-r--r--src/backends/reference/workloads/StridedSlice.cpp103
-rw-r--r--src/backends/tosaCommon/TosaMappings.cpp68
-rw-r--r--src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.cpp262
-rw-r--r--src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.hpp17
-rw-r--r--src/backends/tosaCommon/operatorMappings/CMakeLists.txt18
-rw-r--r--src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.hpp16
-rw-r--r--src/backends/tosaCommon/operatorMappings/ExpOperator.cpp118
-rw-r--r--src/backends/tosaCommon/operatorMappings/ExpOperator.hpp16
-rw-r--r--src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.cpp189
-rw-r--r--src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.hpp16
-rw-r--r--src/backends/tosaCommon/operatorMappings/GeluOperator.cpp108
-rw-r--r--src/backends/tosaCommon/operatorMappings/GeluOperator.hpp20
-rw-r--r--src/backends/tosaCommon/operatorMappings/LogOperator.cpp137
-rw-r--r--src/backends/tosaCommon/operatorMappings/LogOperator.hpp16
-rw-r--r--src/backends/tosaCommon/operatorMappings/PadOperator.cpp70
-rw-r--r--src/backends/tosaCommon/operatorMappings/PadOperator.hpp16
-rw-r--r--src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp8
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReduceOperator.cpp178
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReduceOperator.hpp13
-rw-r--r--src/backends/tosaCommon/operatorMappings/ReluOperator.cpp53
-rw-r--r--src/backends/tosaCommon/operatorMappings/ResizeOperator.hpp10
-rw-r--r--src/backends/tosaCommon/operatorMappings/RsqrtOperator.cpp (renamed from src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp)39
-rw-r--r--src/backends/tosaCommon/operatorMappings/RsqrtOperator.hpp16
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp9
-rw-r--r--src/backends/tosaCommon/operatorMappings/TosaTableUtils.hpp96
-rw-r--r--src/backends/tosaCommon/test/FullyConnectedChecker.hpp101
-rw-r--r--src/backends/tosaCommon/test/OneToManyMappingTests.cpp135
-rw-r--r--src/backends/tosaCommon/test/OneToOneMappingTests.cpp1
-rw-r--r--src/backends/tosaReference/TosaRefLayerSupport.cpp15
-rw-r--r--src/backends/tosaReference/test/TosaRefEndToEndTests.cpp365
55 files changed, 4898 insertions, 238 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 5334641803..9b40cc7d59 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1074,6 +1074,7 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -3642,9 +3643,9 @@ void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
const unsigned int rank = inputTensorInfo.GetNumDimensions();
- if (rank > 4)
+ if (rank > 5)
{
- throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
+ throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 5 are not supported.");
}
ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
diff --git a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp
index 98e75cb8df..3fc81aa0a0 100644
--- a/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -19,7 +19,9 @@ armnn::INetworkPtr CreateBatchMatMulNetwork(const armnn::TensorShape& inputXShap
const armnn::TensorShape& inputYShape,
const armnn::TensorShape& outputShape,
const float qScale = 1.0f,
- const int32_t qOffset = 0)
+ const int32_t qOffset = 0,
+ const bool transposeX = true,
+ const bool transposeY = true)
{
using namespace armnn;
@@ -31,8 +33,8 @@ armnn::INetworkPtr CreateBatchMatMulNetwork(const armnn::TensorShape& inputXShap
TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
BatchMatMulDescriptor batchMatMulDesc;
- batchMatMulDesc.m_TransposeX = false;
- batchMatMulDesc.m_TransposeY = true;
+ batchMatMulDesc.m_TransposeX = transposeX;
+ batchMatMulDesc.m_TransposeY = transposeY;
IConnectableLayer* batchMatMul = network->AddBatchMatMulLayer(batchMatMulDesc, "batchMatMul");
IConnectableLayer* inputX = network->AddInputLayer(0, "inputX");
@@ -58,7 +60,12 @@ void BatchMatMulEndToEnd(const std::vector<armnn::BackendId>& backends)
constexpr float qScale = 1.0f;
constexpr int32_t qOffset = 0;
- INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape, inputYShape, outputShape, qScale, qOffset);
+ INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape,
+ inputYShape,
+ outputShape,
+ qScale,
+ qOffset,
+ false);
CHECK(network);
@@ -89,4 +96,220 @@ void BatchMatMulEndToEnd(const std::vector<armnn::BackendId>& backends)
EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensor, expectedOutput, backends);
}
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void BatchMatMulNotSquareEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+ const TensorShape& inputXShape = { 2, 5, 3 };
+ const TensorShape& inputYShape = { 2, 3, 4 };
+ const TensorShape& outputShape = { 2, 5, 4 };
+
+ constexpr float qScale = 1.0f;
+ constexpr int32_t qOffset = 0;
+
+ INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape,
+ inputYShape,
+ outputShape, qScale, qOffset, false, false);
+
+ CHECK(network);
+
+ std::vector<float> floatInputXData{ 8, 8, 4,
+ 6, 1, 3,
+ 8, 8, 3,
+ 8, 9, 8,
+ 5, 4, 4,
+
+ 1, 8, 5,
+ 7, 1, 1,
+ 8, 7, 9,
+ 3, 2, 7,
+ 8, 5, 3 };
+ std::vector<T> inputXData = armnnUtils::QuantizedVector<T>(floatInputXData, qScale, qOffset);
+
+ std::vector<float> floatInputYData{
+ 6, 2, 3, 2,
+ 6, 2, 2, 8,
+ 3, 7, 8, 1,
+
+ 7, 2, 9, 5,
+ 2, 3, 1, 3,
+ 2, 7, 7, 5 };
+ std::vector<T> inputYData = armnnUtils::QuantizedVector<T>(floatInputYData, qScale, qOffset);
+
+ std::vector<float> floatExpectedOutputData{
+ 108, 60, 72, 84,
+ 51, 35, 44, 23,
+ 105, 53, 64, 83,
+ 126, 90, 106, 96,
+ 66, 46, 55, 46,
+
+ 33, 61, 52, 54,
+ 53, 24, 71, 43,
+ 88, 100, 142, 106,
+ 39, 61, 78, 56,
+ 72, 52, 98, 70
+ };
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
+
+ std::map<int, std::vector<T>> inputTensor = {{ 0, inputXData }, {1, inputYData}};
+ std::map<int, std::vector<T>> expectedOutput = { { 0, expectedOutputData } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensor, expectedOutput, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void BatchMatMulNoTransposeEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+
+ const TensorShape& inputXShape = { 2, 2, 2 };
+ const TensorShape& inputYShape = { 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2 };
+
+ constexpr float qScale = 1.0f;
+ constexpr int32_t qOffset = 0;
+
+ INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape,
+ inputYShape,
+ outputShape,
+ qScale,
+ qOffset,
+ false,
+ false);
+
+ CHECK(network);
+
+ std::vector<float> floatInputXData{ 1., 2.,
+ 3., 4.,
+
+ 9., 10.,
+ 11., 12. };
+ std::vector<T> inputXData = armnnUtils::QuantizedVector<T>(floatInputXData, qScale, qOffset);
+
+ std::vector<float> floatInputYData{ 5., 7.,
+ 6., 8.,
+
+ 13., 15.,
+ 14., 16. };
+ std::vector<T> inputYData = armnnUtils::QuantizedVector<T>(floatInputYData, qScale, qOffset);
+
+ std::vector<float> floatExpectedOutputData{ 17., 23.,
+ 39., 53.,
+
+ 257., 295.,
+ 311., 357. };
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
+
+ std::map<int, std::vector<T>> inputTensor = {{ 0, inputXData }, {1, inputYData}};
+ std::map<int, std::vector<T>> expectedOutput = { { 0, expectedOutputData } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensor, expectedOutput, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void BatchMatMul4DEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+
+ const TensorShape& inputXShape = { 2, 2, 2, 2 };
+ const TensorShape& inputYShape = { 2, 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2, 2 };
+
+ constexpr float qScale = 1.0f;
+ constexpr int32_t qOffset = 0;
+
+ INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape,
+ inputYShape,
+ outputShape,
+ qScale,
+ qOffset,
+ false,
+ false);
+
+ CHECK(network);
+
+ std::vector<float> floatInputXData{ 1., 2.,
+ 3., 4.,
+
+ 5., 6.,
+ 7., 8.,
+
+ 1., 2.,
+ 3., 4.,
+
+ 5., 6.,
+ 7., 8.};
+ std::vector<T> inputXData = armnnUtils::QuantizedVector<T>(floatInputXData, qScale, qOffset);
+
+ std::vector<float> floatInputYData{ 10., 11.,
+ 12., 13.,
+
+ 14., 15.,
+ 16., 17.,
+
+ 10., 11.,
+ 12., 13.,
+
+ 14., 15.,
+ 16., 17. };
+ std::vector<T> inputYData = armnnUtils::QuantizedVector<T>(floatInputYData, qScale, qOffset);
+
+ std::vector<float> floatExpectedOutputData{ 34., 37.,
+ 78., 85.,
+
+ 166., 177.,
+ 226., 241.,
+
+ 34., 37.,
+ 78., 85.,
+
+ 166., 177.,
+ 226., 241. };
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
+
+ std::map<int, std::vector<T>> inputTensor = {{ 0, inputXData }, {1, inputYData}};
+ std::map<int, std::vector<T>> expectedOutput = { { 0, expectedOutputData } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensor, expectedOutput, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void BatchMatMulSimple4DEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+
+ const TensorShape& inputXShape = { 1, 1, 2, 2 };
+ const TensorShape& inputYShape = { 1, 1, 2, 2 };
+ const TensorShape& outputShape = { 1, 1, 2, 2 };
+
+ constexpr float qScale = 1.0f;
+ constexpr int32_t qOffset = 0;
+
+ INetworkPtr network = CreateBatchMatMulNetwork<ArmnnType>(inputXShape,
+ inputYShape,
+ outputShape,
+ qScale,
+ qOffset,
+ false,
+ false);
+
+ CHECK(network);
+
+ std::vector<float> floatInputXData{ 1., 2.,
+ 3., 4. };
+ std::vector<T> inputXData = armnnUtils::QuantizedVector<T>(floatInputXData, qScale, qOffset);
+
+ std::vector<float> floatInputYData{ 5., 7.,
+ 6., 8. };
+ std::vector<T> inputYData = armnnUtils::QuantizedVector<T>(floatInputYData, qScale, qOffset);
+
+ std::vector<float> floatExpectedOutputData{ 17., 23.,
+ 39., 53. };
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
+
+ std::map<int, std::vector<T>> inputTensor = {{ 0, inputXData }, {1, inputYData}};
+ std::map<int, std::vector<T>> expectedOutput = { { 0, expectedOutputData } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensor, expectedOutput, backends);
+}
} // anonymous namespace \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 4438021938..4fa130c2ff 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -45,6 +45,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
MultiplicationEndToEndTestImpl.hpp
OptimizeSubgraphViewTests.cpp
OptimizationViewsTests.cpp
+ PadEndToEndTestImpl.hpp
PreluEndToEndTestImpl.hpp
QLstmEndToEndTestImpl.cpp
QLstmEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp
index f53f97ae88..b424db44f6 100644
--- a/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp
@@ -24,8 +24,7 @@ armnn::INetworkPtr CreateConstConvolution2dNetwork(const armnn::Convolution2dDes
const armnn::TensorInfo& biasInfo,
const armnn::TensorInfo& outputInfo,
const armnn::ConstTensor& weights,
- const armnn::ConstTensor& biases,
- bool biasEnabled)
+ const armnn::ConstTensor& biases)
{
using namespace armnn;
@@ -38,7 +37,7 @@ armnn::INetworkPtr CreateConstConvolution2dNetwork(const armnn::Convolution2dDes
Connect(input, convolution2d, inputInfo, 0, 0);
Connect(weightsLayer, convolution2d, weightsInfo, 0, 1);
- if(biasEnabled)
+ if(descriptor.m_BiasEnabled)
{
armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias");
Connect(biasLayer, convolution2d, biasInfo, 0, 2);
@@ -85,15 +84,15 @@ void Convolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
3, 2, 1
};
- std::vector<float> biasesData = { 1 };
- float bias = biasEnabled ? biasesData[0] : 0;
+ std::vector<float> biasesData = biasEnabled ? std::vector<float>({ 1.0f })
+ : std::vector<float>({ 0.f });
std::vector<float> expectedOutputData =
- {
- 65 + bias, 76 + bias, 91 + bias,
- 107 + bias, 99 + bias, 89 + bias,
- 116 + bias, 98 + bias, 118 + bias
- };
+ {
+ 65 + biasesData[0], 76 + biasesData[0], 91 + biasesData[0],
+ 107 + biasesData[0], 99 + biasesData[0], 89 + biasesData[0],
+ 116 + biasesData[0], 98 + biasesData[0], 118 + biasesData[0]
+ };
Convolution2dDescriptor descriptor;
descriptor.m_PadLeft = 0;
@@ -127,8 +126,7 @@ void Convolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
biasesInfo,
outputInfo,
weights,
- biases,
- biasEnabled);
+ biases);
EndToEndLayerTestImpl<ArmnnIType, ArmnnOType>(std::move(network),
{{ 0, qInputData }},
diff --git a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
index 778b4823c3..07836be8f1 100644
--- a/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
+++ b/src/backends/backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp
@@ -31,23 +31,26 @@ armnn::INetworkPtr CreateDepthwiseConvolution2dNetwork(const armnn::DepthwiseCon
INetworkPtr network(INetwork::Create());
IConnectableLayer* input = network->AddInputLayer(0, "input");
armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights");
- armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias");
IConnectableLayer* convolution2d = network->AddDepthwiseConvolution2dLayer(descriptor, "depthwiseConvolution2d");
IConnectableLayer* output = network->AddOutputLayer(0, "output");
Connect(input, convolution2d, inputInfo, 0, 0);
Connect(weightsLayer, convolution2d, weightsInfo, 0, 1);
- Connect(biasLayer, convolution2d, biasInfo, 0, 2);
Connect(convolution2d, output, outputInfo, 0, 0);
+ if(descriptor.m_BiasEnabled)
+ {
+ armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias");
+ Connect(biasLayer, convolution2d, biasInfo, 0, 2);
+ }
+
return network;
}
-} // anonymous namespace
-
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType = ArmnnType>
void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
- armnn::DataLayout dataLayout)
+ armnn::DataLayout dataLayout,
+ bool biasEnabled = true)
{
using namespace armnn;
using T = ResolveType<ArmnnType>;
@@ -86,6 +89,7 @@ void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
+
0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 1.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.5f, 0.0f,
@@ -125,7 +129,7 @@ void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
std::vector<float> biasesData = { 0.0f, 2.0f, 1.0f, -1.0f };
- std::vector<float> expectedOutputData =
+ std::vector<float> expectedOutputData = biasEnabled ? std::vector<float>(
{
3.0f, 4.5f, 2.0f, 1.0f, 3.0f, 4.5f, 3.0f, 1.0f, 3.0f, 4.5f, 4.0f, 3.0f, 3.0f, 4.5f,
1.0f, -1.0f, 3.0f, 4.5f, 1.0f, -1.0f, 3.0f, 4.5f, 1.0f, -1.0f, 3.0f, 4.5f, 1.0f, -1.0f,
@@ -133,25 +137,58 @@ void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
1.0f, -1.0f, 3.0f, 4.5f, 1.0f, -1.0f, 3.0f, 4.5f, 1.0f, -1.0f, 3.0f, 4.5f, 1.0f, -1.0f,
3.0f, 5.5f, 3.0f, 2.0f, 3.0f, 5.5f, 4.0f, 2.0f, 3.0f, 5.5f, 5.0f, 4.0f, 3.0f, 5.5f,
1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f,
+
3.0f, 5.5f, 1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f, 3.0f, 5.5f,
1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f, 3.0f, 5.5f, 1.0f, -1.0f,
5.0f, 6.5f, 3.0f, 2.0f, 5.0f, 6.5f, 4.0f, 2.0f, 5.0f, 6.5f, 5.0f, 4.0f, 5.0f, 6.5f,
1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f,
5.0f, 6.5f, 1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f, 5.0f, 6.5f,
1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f, 5.0f, 6.5f, 1.0f, -1.0f,
+
5.5f, 8.0f, 3.0f, 2.0f, 5.5f, 8.0f, 4.0f, 2.0f, 5.5f, 8.0f, 5.0f, 4.0f, 5.5f, 8.0f,
1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f,
5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f,
1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f,
5.5f, 8.0f, 3.0f, 2.0f, 5.5f, 8.0f, 4.0f, 2.0f, 5.5f, 8.0f, 5.0f, 4.0f, 5.5f, 8.0f,
1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f,
+
5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f,
1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f, 5.5f, 8.0f, 1.0f, -1.0f,
5.0f, 8.0f, 3.0f, 2.0f, 5.0f, 8.0f, 4.0f, 2.0f, 5.0f, 8.0f, 5.0f, 4.0f, 5.0f, 8.0f,
1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f,
5.0f, 8.0f, 1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f, 5.0f, 8.0f,
1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f, 5.0f, 8.0f, 1.0f, -1.0f
- };
+ } )
+ : std::vector<float>(
+ {
+ 3.0f, 2.5f, 1.0f, 2.0f, 3.0f, 2.5f, 2.0f, 2.0f, 3.0f, 2.5f, 3.0f, 4.0f, 3.0f, 2.5f,
+ 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f,
+ 3.0f, 2.5f, 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f, 3.0f, 2.5f,
+ 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f, 3.0f, 2.5f, 0.0f, 0.0f,
+ 3.0f, 3.5f, 2.0f, 3.0f, 3.0f, 3.5f, 3.0f, 3.0f, 3.0f, 3.5f, 4.0f, 5.0f, 3.0f, 3.5f,
+ 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f,
+
+ 3.0f, 3.5f, 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f, 3.0f, 3.5f,
+ 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f, 3.0f, 3.5f, 0.0f, 0.0f,
+ 5.0f, 4.5f, 2.0f, 3.0f, 5.0f, 4.5f, 3.0f, 3.0f, 5.0f, 4.5f, 4.0f, 5.0f, 5.0f, 4.5f,
+ 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f,
+ 5.0f, 4.5f, 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f, 5.0f, 4.5f,
+ 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f, 5.0f, 4.5f, 0.0f, 0.0f,
+
+ 5.5f, 6.0f, 2.0f, 3.0f, 5.5f, 6.0f, 3.0f, 3.0f, 5.5f, 6.0f, 4.0f, 5.0f, 5.5f, 6.0f,
+ 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f,
+ 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f,
+ 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f,
+ 5.5f, 6.0f, 2.0f, 3.0f, 5.5f, 6.0f, 3.0f, 3.0f, 5.5f, 6.0f, 4.0f, 5.0f, 5.5f, 6.0f,
+ 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f,
+
+ 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f,
+ 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f, 5.5f, 6.0f, 0.0f, 0.0f,
+ 5.0f, 6.0f, 2.0f, 3.0f, 5.0f, 6.0f, 3.0f, 3.0f, 5.0f, 6.0f, 4.0f, 5.0f, 5.0f, 6.0f,
+ 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f,
+ 5.0f, 6.0f, 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f, 5.0f, 6.0f,
+ 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f, 5.0f, 6.0f, 0.0f, 0.0f
+ } );
DepthwiseConvolution2dDescriptor descriptor;
descriptor.m_PadLeft = 0;
@@ -160,7 +197,7 @@ void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
descriptor.m_PadBottom = 1;
descriptor.m_StrideX = 1;
descriptor.m_StrideY = 1;
- descriptor.m_BiasEnabled = true;
+ descriptor.m_BiasEnabled = biasEnabled;
descriptor.m_DataLayout = dataLayout;
// Permute input if NCHW, the original input and output are in NHWC format.
@@ -174,10 +211,9 @@ void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
// Quantize data
std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+ std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
- std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
-
ConstTensor weights(weightsInfo, qWeightsData);
ConstTensor biases(biasesInfo, qBiasesData);
@@ -194,3 +230,5 @@ void DepthwiseConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
{ { 0, qExpectedOutputData } },
backends);
}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index 9d05a64ce8..609fe438ff 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2021, 2023 Arm Ltd. All rights reserved.
+// Copyright © 2020-2021, 2023-2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -78,6 +78,20 @@ void ElementwiseUnarySimpleEndToEnd(const std::vector<BackendId>& backends,
expectedOutput = { 1.f, 1.f, 1.f, 1.f, 0.447214f, 0.447214f, 0.447214f, 0.447214f,
0.57735f, 0.57735f, 0.57735f, 0.57735f, 0.5f, 0.5f, 0.5f, 0.5f };
break;
+ case UnaryOperation::Exp:
+ input = { 1, -1, 1, 1, 5, -5, 5, 5,
+ -3, 3, 3, 3, 4, 4, -4, 4 };
+ expectedOutput =
+ { 2.718282f, 0.367879f, 2.718282f, 2.718282f, 148.413162f, 0.006738f, 148.413162f, 148.413162f,
+ 0.049787f, 20.085537f, 20.085537f, 20.085537f, 54.598148f, 54.598148f, 0.018316f, 54.598148f };
+ break;
+ case UnaryOperation::Log:
+ input = { 1, 1, 1, 1, 5, 5, 5, 5,
+ 3, 3, 3, 3, 4, 4, 4, 4 };
+ expectedOutput =
+ { 0.000000f, 0.000000f, 0.000000f, 0.000000f, 1.609438f, 1.609438f, 1.609438f, 1.609438f,
+ 1.098612f, 1.098612f, 1.098612f, 1.098612f, 1.386294f, 1.386294f, 1.386294f, 1.386294f };
+ break;
default:
input = { 1, -1, 1, 1, 5, -5, 5, 5,
-3, 3, 3, 3, 4, 4, -4, 4 };
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index a65f3b4b98..a6b52e67d4 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -161,6 +161,128 @@ armnn::INetworkPtr CreateFullyConnectedNetworkNoConnectedBiasExplicit(const armn
return network;
}
+armnn::INetworkPtr CreateFullyConnectedNetworkConstantWeightsAndBias(const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::TensorInfo& weightsTensorInfo,
+ const armnn::ConstTensor& weightsConstantTensor,
+ const armnn::TensorInfo& biasTensorInfo,
+ const armnn::ConstTensor& biasConstantTensor,
+ armnn::FullyConnectedDescriptor descriptor)
+{
+ armnn::INetworkPtr network(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input");
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weightsConstantTensor, "Weights");
+ armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, "Fully_Connected");
+ armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+ Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
+ Connect(weightsLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
+ Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
+
+ if (descriptor.m_BiasEnabled)
+ {
+ armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biasConstantTensor, "Bias");
+ Connect(biasLayer, fullyConnectedLayer, biasTensorInfo, 0, 2);
+ }
+
+ return network;
+}
+
+template<DataType ArmnnIType, DataType ArmnnWType = ArmnnIType, DataType ArmnnBType = ArmnnIType,
+ DataType ArmnnOType = ArmnnIType>
+void FullyConnectedConstantWeightsAndBiasEndToEnd(const std::vector<armnn::BackendId>& backends, const bool biasEnabled)
+{
+ using namespace armnn;
+
+ unsigned int inputWidth = 1;
+ unsigned int inputHeight = 1;
+ unsigned int inputChannels = 5;
+ unsigned int inputNum = 2;
+
+ unsigned int outputChannels = 3;
+ unsigned int outputNum = 2;
+
+ unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
+ unsigned int outputShape[] = { outputNum, outputChannels };
+ unsigned int weightsShape[] = { outputChannels, inputChannels };
+ unsigned int biasShape[] = { outputChannels };
+
+ using IT = ResolveType<ArmnnIType>;
+ using WT = ResolveType<ArmnnWType>;
+ using BT = ResolveType<ArmnnBType>;
+ using OT = ResolveType<ArmnnOType>;
+
+ const float qScale = IsQuantizedType<IT>() ? 0.5f : 1.0f;
+ const int32_t qOffset = IsQuantizedType<IT>() ? 2 : 0;
+
+ armnn::TensorInfo inputTensorInfo(4, inputShape, ArmnnIType, qScale, qOffset, true);
+ armnn::TensorInfo weightsTensorInfo(2, weightsShape, ArmnnWType, qScale, qOffset, true);
+ armnn::TensorInfo biasTensorInfo(1, biasShape, ArmnnBType, qScale * qScale, 0, true);
+ armnn::TensorInfo outputTensorInfo(2, outputShape, ArmnnOType, qScale, qOffset);
+
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_ConstantWeights = true;
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_TransposeWeightMatrix = true;
+
+ std::vector<float> floatInputData =
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+ 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
+ };
+
+ std::vector<float> floatWeightsData =
+ {
+ .5f, .5f, .5f, .5f, .5f,
+ 2.f, 2.f, 2.f, 2.f, 2.f,
+ .5f, 1.f, 2.f, 3.f, 4.f
+ };
+
+ std::vector<float> floatBiasData = biasEnabled ? std::vector<float>({10.f, 20.f, 30.f})
+ : std::vector<float>({0.f, 0.f, 0.f});
+
+ std::vector<float> floatOutputData =
+ {
+ 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + floatBiasData[0], // 7.5 or 17.5
+ 2.0f + 4.0f + 6.0f + 8.0f + 10.f + floatBiasData[1], // 30 or 50
+ 0.5f + 2.0f + 6.0f + 12.f + 20.f + floatBiasData[2], // 40.5 or 70.5
+
+ 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + floatBiasData[0], // 7.5 or 17.5
+ 10.0f + 8.0f + 6.0f + 4.0f + 2.f + floatBiasData[1], // 30 or 50
+ 2.5f + 4.0f + 6.0f + 6.f + 4.f + floatBiasData[2] // 22.5 or 52.5
+ };
+
+ std::vector<IT> inputData = armnnUtils::QuantizedVector<IT>(floatInputData, qScale, qOffset);
+ std::vector<WT> weightsData = armnnUtils::QuantizedVector<WT>(floatWeightsData, qScale, qOffset);
+ std::vector<BT> biasData = armnnUtils::QuantizedVector<BT>(floatBiasData, qScale * qScale);
+ std::vector<OT> expectedOutputData = armnnUtils::QuantizedVector<OT>(floatOutputData, qScale, qOffset);
+
+ ConstTensor weightsConstantTensor(weightsTensorInfo, weightsData.data());
+ ConstTensor biasConstantTensor(biasTensorInfo, biasData.data());
+
+ armnn::INetworkPtr network = CreateFullyConnectedNetworkConstantWeightsAndBias(inputTensorInfo,
+ outputTensorInfo,
+ weightsTensorInfo,
+ weightsConstantTensor,
+ biasTensorInfo,
+ biasConstantTensor,
+ descriptor);
+
+ CHECK(network);
+
+ std::map<int, std::vector<IT>> inputTensorData = {{ 0, inputData }};
+ std::map<int, std::vector<OT>> expectedOutputTensorData = {{ 0, expectedOutputData }};
+
+ const float tolerance = IsQuantizedType<IT>() ? 1.0f: 0.000001f;
+
+ EndToEndLayerTestImpl<ArmnnIType, ArmnnOType>(std::move(network),
+ inputTensorData,
+ expectedOutputTensorData,
+ backends,
+ tolerance);
+}
+
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId>& backends)
{
diff --git a/src/backends/backendsCommon/test/PadEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PadEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..1b0c2ac87c
--- /dev/null
+++ b/src/backends/backendsCommon/test/PadEndToEndTestImpl.hpp
@@ -0,0 +1,313 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <ResolveType.hpp>
+
+#include <armnn/INetwork.hpp>
+
+#include <doctest/doctest.h>
+#include <CommonTestUtils.hpp>
+
+using namespace armnn;
+namespace
+{
+
+template<typename armnn::DataType DataType>
+armnn::INetworkPtr CreatePadNetwork(const armnn::TensorShape& inputXShape,
+ const armnn::TensorShape& outputShape,
+ const armnn::PadDescriptor padDesc,
+ const float qScale = 1.0f,
+ const int32_t qOffset = 0)
+{
+
+ INetworkPtr network(INetwork::Create());
+
+ TensorInfo inputTensorInfo(inputXShape, DataType, qScale, qOffset, true);
+
+ TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset, true);
+
+ IConnectableLayer* padLayer = network->AddPadLayer(padDesc, "pad");
+ IConnectableLayer* inputX = network->AddInputLayer(0, "input");
+ IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+ Connect(inputX, padLayer, inputTensorInfo, 0, 0);
+ Connect(padLayer, output, outputTensorInfo, 0, 0);
+
+ return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void PadEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+
+ const armnn::TensorShape inputShape{ 3, 3 };
+ const armnn::TensorShape outputShape{ 7, 7 };
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::PadDescriptor descriptor;
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+ descriptor.m_PadList = padList;
+ descriptor.m_PadValue = 3;
+ armnn::WorkloadInfo info;
+
+ INetworkPtr network = CreatePadNetwork<ArmnnType>(inputShape,
+ outputShape,
+ descriptor,
+ qScale,
+ qOffset);
+ CHECK(network);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>({
+ // Height (3) x Width (3)
+ 4, 8, 6,
+ 7, 4, 4,
+ 3, 2, 4
+ }, qScale, qOffset);
+
+ float p = 3;
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>({
+ p, p, p, p, p, p, p,
+ p, p, p, p, p, p, p,
+ p, p, 4, 8, 6, p, p,
+ p, p, 7, 4, 4, p, p,
+ p, p, 3, 2, 4, p, p,
+ p, p, p, p, p, p, p,
+ p, p, p, p, p, p, p
+ }, qScale, qOffset);
+
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputValues } };
+ std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutputValues } };
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+ inputTensorData,
+ expectedOutputData,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void Pad4dEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+
+ const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
+ const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::PadDescriptor padDesc;
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(3,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+ padDesc.m_PadList = padList;
+ padDesc.m_PadValue = 0;
+ INetworkPtr network = CreatePadNetwork<ArmnnType>(inputShape,
+ outputShape,
+ padDesc,
+ qScale,
+ qOffset);
+ CHECK(network);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>({ // Batch 0, Channel 0, Height (3) x Width (2)
+ 0, 1,
+ 2, 3,
+ 4, 5,
+
+ // Batch 0, Channel 1, Height (3) x Width (2)
+ 6, 7,
+ 8, 9,
+ 10, 11,
+
+ // Batch 1, Channel 0, Height (3) x Width (2)
+ 12, 13,
+ 14, 15,
+ 16, 17,
+
+ // Batch 1, Channel 1, Height (3) x Width (2)
+ 18, 19,
+ 20, 21,
+ 22, 23
+ }, qScale, qOffset);
+
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>({ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 1, 0,
+ 0, 2, 3, 0,
+ 0, 4, 5, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 6, 7, 0,
+ 0, 8, 9, 0,
+ 0, 10, 11, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 12, 13, 0,
+ 0, 14, 15, 0,
+ 0, 16, 17, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 18, 19, 0,
+ 0, 20, 21, 0,
+ 0, 22, 23, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0
+ }, qScale, qOffset);
+
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputValues } };
+ std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutputValues } };
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+ inputTensorData,
+ expectedOutputData,
+ backends);
+}
+
+} // anonymous namespace \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/ReduceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ReduceEndToEndTestImpl.hpp
index 83c59f594f..9440bda5ad 100644
--- a/src/backends/backendsCommon/test/ReduceEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ReduceEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -40,29 +40,202 @@ armnn::INetworkPtr CreateReduceNetwork(const armnn::TensorShape& inputShape,
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-void ReduceEndToEnd(const std::vector<armnn::BackendId>& backends)
+void ReduceEndToEnd2d(const std::vector<armnn::BackendId>& backends,
+ ReduceOperation reduceOperation,
+ bool keepDims = false)
{
using namespace armnn;
- const TensorShape& inputShape = { 1, 1, 1, 5 };
- const TensorShape& outputShape = { 1, 1, 1 };
+ ReduceDescriptor descriptor;
+ descriptor.m_KeepDims = keepDims;
+ descriptor.m_vAxis = { 0 };
+ descriptor.m_ReduceOperation = reduceOperation;
+
+ TensorShape inputShape = { 2, 3 };
+ TensorShape outputShape = inputShape;
+
+ if (keepDims)
+ {
+ outputShape[descriptor.m_vAxis[0]] = 1;
+ }
+ else
+ {
+ outputShape = { 3 };
+ }
+
+ INetworkPtr network = CreateReduceNetwork<ArmnnType>(inputShape, outputShape, descriptor);
+
+ CHECK(network);
+
+ std::vector<float> floatInputData =
+ {
+ 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f
+ };
+
+ std::vector<float> floatOutputData;
+
+ switch(reduceOperation)
+ {
+ case ReduceOperation::Sum:
+ floatOutputData =
+ {
+ 5.0f, 7.0f, 9.0f
+ };
+ break;
+ default:
+ throw armnn::Exception("ReduceEndToEnd2d: Reduce Operation not implemented.");
+ }
+
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>(floatInputData);
+ std::vector<T> outputData = armnnUtils::QuantizedVector<T>(floatOutputData);
+
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
+ std::map<int, std::vector<T>> expectedOutputData = { { 0, outputData } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ReduceEndToEnd3d(const std::vector<armnn::BackendId>& backends,
+ ReduceOperation reduceOperation,
+ bool keepDims = false)
+{
+ using namespace armnn;
ReduceDescriptor descriptor;
- descriptor.m_KeepDims = false;
- descriptor.m_vAxis = { 3 };
- descriptor.m_ReduceOperation = ReduceOperation::Sum;
+ descriptor.m_KeepDims = keepDims;
+ descriptor.m_vAxis = { 1 };
+ descriptor.m_ReduceOperation = reduceOperation;
+
+ TensorShape inputShape = { 2, 2, 3 };
+ TensorShape outputShape = inputShape;
+
+ if (keepDims)
+ {
+ outputShape[descriptor.m_vAxis[0]] = 1;
+ }
+ else
+ {
+ outputShape = { 2, 3 };
+ }
INetworkPtr network = CreateReduceNetwork<ArmnnType>(inputShape, outputShape, descriptor);
CHECK(network);
- std::vector<float> floatInputData({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
- std::vector<float> floatOutputData({ 34.0f });
+ std::vector<float> floatInputData =
+ {
+ 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+
+ 7.0f, 8.0f, 9.0f,
+ 10.0f, 11.0f, 12.0f
+ };
+
+ std::vector<float> floatOutputData;
+
+ switch(reduceOperation)
+ {
+ case ReduceOperation::Sum:
+ floatOutputData =
+ {
+ 5.0f, 7.0f, 9.0f,
+ 17.0f, 19.0f, 21.0f
+ };
+ break;
+ default:
+ throw armnn::Exception("ReduceEndToEnd3d: Reduce Operation not implemented.");
+ }
+
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>(floatInputData);
+ std::vector<T> outputData = armnnUtils::QuantizedVector<T>(floatOutputData);
+
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
+ std::map<int, std::vector<T>> expectedOutputData = { { 0, outputData } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ReduceEndToEnd4d(const std::vector<armnn::BackendId>& backends,
+ ReduceOperation reduceOperation,
+ bool keepDims = false)
+{
+ using namespace armnn;
+
+ ReduceDescriptor descriptor;
+ descriptor.m_KeepDims = keepDims;
+ descriptor.m_vAxis = { 3 };
+ descriptor.m_ReduceOperation = reduceOperation;
+
+ TensorShape inputShape = { 1, 1, 1, 5 };
+ TensorShape outputShape = inputShape;
+
+ if (keepDims)
+ {
+ outputShape[descriptor.m_vAxis[0]] = 1;
+ }
+ else
+ {
+ outputShape = { 1, 1, 1 };
+ }
+
+ INetworkPtr network = CreateReduceNetwork<ArmnnType>(inputShape, outputShape, descriptor);
+
+ CHECK(network);
+
+ std::vector<float> floatInputData =
+ {
+ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f
+ };
+
+ std::vector<float> floatOutputData;
+
+ switch(reduceOperation)
+ {
+ case ReduceOperation::Sum:
+ floatOutputData = { 34.0f };
+ break;
+ default:
+ throw armnn::Exception("ReduceEndToEnd4d: Reduce Operation not implemented.");
+ }
+
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>(floatInputData);
+ std::vector<T> outputData = armnnUtils::QuantizedVector<T>(floatOutputData);
+
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
+ std::map<int, std::vector<T>> expectedOutputData = { { 0, outputData } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ReduceEndToEndEmptyAxis(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+
+ const TensorShape& inputShape = { 2, 1, 3 };
+ const TensorShape& outputShape = { 1 };
+
+ ReduceDescriptor descriptor;
+
+ INetworkPtr network = CreateReduceNetwork<ArmnnType>(inputShape, outputShape, descriptor);
+
+ CHECK(network);
+
+ std::vector<float> floatInputData =
+ {
+ 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f
+ };
+
+ std::vector<float> floatOutputData = { 21.0f };
- std::vector<T> inputData = armnnUtils::QuantizedVector<T>(floatInputData);
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>(floatInputData);
std::vector<T> outputData = armnnUtils::QuantizedVector<T>(floatOutputData);
- std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
std::map<int, std::vector<T>> expectedOutputData = { { 0, outputData } };
EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 8b024e5a35..6ae540382a 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -422,6 +422,73 @@ LayerTestResult<T, 4> Pad4dTestCommon(
}
template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 5> Pad5dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue)
+{
+ IgnoreUnused(memoryManager);
+ const armnn::TensorShape inputShape{ 1, 1, 2, 1, 1 };
+ const armnn::TensorShape outputShape{ 2, 1, 4, 1, 2 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+ {
+ 3, 3
+ },
+ qScale, qOffset);
+
+ auto p = customPaddingValue;
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+ {p, p, 3, p, 3, p, p, p, p, p, p, p, p, p, p, p}
+ );
+
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::PadQueueDescriptor descriptor;
+
+ std::vector<std::pair<unsigned int, unsigned int>> PadList;
+ PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
+ PadList.push_back(std::pair<unsigned int, unsigned int>(0,0));
+ PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ PadList.push_back(std::pair<unsigned int, unsigned int>(0,0));
+ PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
+
+ descriptor.m_Parameters.m_PadList = PadList;
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 5>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+
+template<armnn::DataType ArmnnType, typename T>
LayerTestResult<T, 2> PadQAsymmTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -613,6 +680,14 @@ LayerTestResult<float, 4> PadFloat324dTest(
return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
+LayerTestResult<float, 5> PadFloat325dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return Pad5dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
LayerTestResult<armnn::BFloat16, 2> PadBFloat162dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
index b62fc78ba3..c25ac5ce8e 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,6 +40,15 @@ LayerTestResult<T, 4> Pad4dTestCommon(
int32_t qOffset);
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 5> Pad5dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue = 0.0f);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> PadQAsymmTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -88,6 +97,11 @@ LayerTestResult<float, 4> PadFloat324dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
+LayerTestResult<float, 5> PadFloat325dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
LayerTestResult<armnn::BFloat16, 2> PadBFloat162dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index 10edd85a5b..97df42559e 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -860,6 +860,599 @@ LayerTestResult<T, 2> StridedSlice2dReverseTest(
inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 1, 2, 4};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {2, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 1;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {2, 1, 2, 4};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {2, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 2;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask4Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {2, 1, 1, 4};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {2, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 4;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> StridedSlice3dEllipsisMask1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {2, 1, 2};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_EllipsisMask = 1;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 2.0f, 3.0f,
+
+ 20.0f, 30.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 3>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> StridedSlice3dEllipsisMask2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 3, 2};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_EllipsisMask = 2;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 2.0f, 3.0f, 6.0f, 7.0f, 10.0f, 11.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 3>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> StridedSlice3dEllipsisMask4Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 1, 4};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_EllipsisMask = 4;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 3>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> StridedSlice3dNewAxisMask1EllipsisMask1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {2, 1, 2};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 1;
+ desc.m_Parameters.m_EllipsisMask = 1;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 2.0f, 3.0f,
+
+ 20.0f, 30.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 3>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask1EllipsisMask2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 2, 3, 2};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 1;
+ desc.m_Parameters.m_EllipsisMask = 2;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 2.0f, 3.0f, 6.0f, 7.0f, 10.0f, 11.0f,
+
+ 20.0f, 30.0f, 60.0f, 70.0f, 100.0f, 110.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask1EllipsisMask4Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 1, 3, 4};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 1;
+ desc.m_Parameters.m_EllipsisMask = 4;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask2EllipsisMask1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {2, 3, 1, 2};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 2;
+ desc.m_Parameters.m_EllipsisMask = 1;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 2.0f, 3.0f, 6.0f, 7.0f, 10.0f, 11.0f,
+
+ 20.0f, 30.0f, 60.0f, 70.0f, 100.0f, 110.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> StridedSlice3dNewAxisMask2EllipsisMask2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 3, 2};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 2;
+ desc.m_Parameters.m_EllipsisMask = 2;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 2.0f, 3.0f, 6.0f, 7.0f, 10.0f, 11.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 3>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask2EllipsisMask4Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 1, 3, 4};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 2;
+ desc.m_Parameters.m_EllipsisMask = 4;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask4EllipsisMask1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {2, 3, 1, 1};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 4;
+ desc.m_Parameters.m_EllipsisMask = 1;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 5.0f, 9.0f,
+
+ 10.0f, 50.0f, 90.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> StridedSlice3dNewAxisMask4EllipsisMask2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 3, 4, 1};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 4;
+ desc.m_Parameters.m_EllipsisMask = 2;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 4>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> StridedSlice3dNewAxisMask4EllipsisMask4Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = {2, 3, 4};
+ unsigned int outputShape[] = {1, 1, 4};
+
+ armnn::StridedSliceQueueDescriptor desc;
+ desc.m_Parameters.m_Begin = {0, 0, 1};
+ desc.m_Parameters.m_End = {1, 1, 3};
+ desc.m_Parameters.m_Stride = {1, 1, 1};
+ desc.m_Parameters.m_NewAxisMask = 4;
+ desc.m_Parameters.m_EllipsisMask = 4;
+
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f, 110.0f, 120.0f
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ });
+
+ return StridedSliceTestImpl<T, 3, 3>(
+ workloadFactory, memoryManager, tensorHandleFactory,
+ inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
+}
+
} // anonymous namespace
LayerTestResult<float, 4> StridedSlice4dFloat32Test(
@@ -1052,6 +1645,156 @@ LayerTestResult<float, 2> StridedSlice2dReverseFloat32Test(
tensorHandleFactory);
}
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask1Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask2Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask4Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 3> StridedSlice3dEllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask1Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 3> StridedSlice3dEllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask2Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 3> StridedSlice3dEllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask4Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 3> StridedSlice3dNewAxisMask1EllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask1EllipsisMask1Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask1EllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask1EllipsisMask2Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask1EllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask1EllipsisMask4Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask2EllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask2EllipsisMask1Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 3> StridedSlice3dNewAxisMask2EllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask2EllipsisMask2Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask2EllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask2EllipsisMask4Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask4EllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask4EllipsisMask1Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask4EllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask4EllipsisMask2Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 3> StridedSlice3dNewAxisMask4EllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask4EllipsisMask4Test<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
LayerTestResult<uint8_t, 4> StridedSlice4dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1224,6 +1967,66 @@ LayerTestResult<uint8_t, 2> StridedSlice2dReverseUint8Test(
return StridedSlice2dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
}
+LayerTestResult<uint8_t, 4> StridedSlice3dNewAxisMask1Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask1Test<armnn::DataType::QAsymmU8>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> StridedSlice3dNewAxisMask2Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask2Test<armnn::DataType::QAsymmU8>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> StridedSlice3dNewAxisMask4Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask4Test<armnn::DataType::QAsymmU8>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 3> StridedSlice3dEllipsisMask1Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask1Test<armnn::DataType::QAsymmU8>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 3> StridedSlice3dEllipsisMask2Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask2Test<armnn::DataType::QAsymmU8>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 3> StridedSlice3dEllipsisMask4Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask4Test<armnn::DataType::QAsymmU8>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
LayerTestResult<int16_t, 4> StridedSlice4dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1301,3 +2104,63 @@ LayerTestResult<int16_t, 2> StridedSlice2dReverseInt16Test(
{
return StridedSlice2dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory);
}
+
+LayerTestResult<int16_t, 4> StridedSlice3dNewAxisMask1Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask1Test<armnn::DataType::QSymmS16>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> StridedSlice3dNewAxisMask2Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask2Test<armnn::DataType::QSymmS16>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> StridedSlice3dNewAxisMask4Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dNewAxisMask4Test<armnn::DataType::QSymmS16>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 3> StridedSlice3dEllipsisMask1Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask1Test<armnn::DataType::QSymmS16>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 3> StridedSlice3dEllipsisMask2Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask2Test<armnn::DataType::QSymmS16>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 3> StridedSlice3dEllipsisMask4Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return StridedSlice3dEllipsisMask4Test<armnn::DataType::QSymmS16>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp
index e53f439da5..13a2288f77 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -105,6 +105,81 @@ LayerTestResult<float, 2> StridedSlice2dReverseFloat32Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> StridedSlice3dEllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> StridedSlice3dEllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> StridedSlice3dEllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> StridedSlice3dNewAxisMask1EllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask1EllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask1EllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask2EllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> StridedSlice3dNewAxisMask2EllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask2EllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask4EllipsisMask1Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> StridedSlice3dNewAxisMask4EllipsisMask2Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> StridedSlice3dNewAxisMask4EllipsisMask4Float32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
LayerTestResult<uint8_t, 4> StridedSlice4dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -195,6 +270,36 @@ LayerTestResult<uint8_t, 2> StridedSlice2dReverseUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
+LayerTestResult<uint8_t, 4> StridedSlice3dNewAxisMask1Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> StridedSlice3dNewAxisMask2Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> StridedSlice3dNewAxisMask4Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 3> StridedSlice3dEllipsisMask1Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 3> StridedSlice3dEllipsisMask2Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 3> StridedSlice3dEllipsisMask4Uint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
LayerTestResult<int16_t, 4> StridedSlice4dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -239,3 +344,33 @@ LayerTestResult<int16_t, 2> StridedSlice2dReverseInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> StridedSlice3dNewAxisMask1Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> StridedSlice3dNewAxisMask2Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> StridedSlice3dNewAxisMask4Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 3> StridedSlice3dEllipsisMask1Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 3> StridedSlice3dEllipsisMask2Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 3> StridedSlice3dEllipsisMask4Int16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index fa5d545547..783893012a 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -497,14 +497,125 @@ TEST_CASE("ClPreluEndToEndTestUint8")
}
// Reduce
-TEST_CASE("ClReduceEndToEndTest")
+// Reduce Sum
+TEST_CASE("ClReduceSumSum2dEndtoEndTestSigned32")
{
- ReduceEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
+ ReduceEndToEnd2d<DataType::Signed32>(clDefaultBackends, ReduceOperation::Sum);
}
-TEST_CASE("ClReduceEndToEndTestFloat16")
+TEST_CASE("ClReduceSumSum2dEndtoEndTestSigned32WithKeepDims")
{
- ReduceEndToEnd<armnn::DataType::Float16>(clDefaultBackends);
+ ReduceEndToEnd2d<DataType::Signed32>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum2dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd2d<DataType::Float16>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum2dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float16>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum2dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd2d<DataType::Float32>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum2dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float32>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum2dEndtoEndTestInt8")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum2dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd3d<DataType::Float16>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float16>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd3d<DataType::Float32>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float32>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestInt8")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum3dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd4d<DataType::Float16>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float16>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd4d<DataType::Float32>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float32>(clDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestInt8")
+{
+ ReduceEndToEnd4d<DataType::QAsymmS8>(clDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("ClReduceSumSum4dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::QAsymmS8>(clDefaultBackends, ReduceOperation::Sum, true);
}
// Reshape
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 6634ac6673..d19b9f59c3 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -752,14 +752,125 @@ TEST_CASE("NeonArgMinAxis3TestQuantisedAsymm8")
}
// Reduce
-TEST_CASE("NeonReduceEndToEndTest")
+// Reduce Sum
+TEST_CASE("NeonReduceSum2dEndtoEndTestSigned32")
{
- ReduceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
+ ReduceEndToEnd2d<DataType::Signed32>(neonDefaultBackends, ReduceOperation::Sum);
}
-TEST_CASE("NeonReduceEndToEndTestFloat16")
+TEST_CASE("NeonReduceSum2dEndtoEndTestSigned32WithKeepDims")
{
- ReduceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends);
+ ReduceEndToEnd2d<DataType::Signed32>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum2dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd2d<DataType::Float16>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum2dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float16>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum2dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd2d<DataType::Float32>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum2dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float32>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum2dEndtoEndTestInt8")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum2dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd3d<DataType::Float16>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float16>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd3d<DataType::Float32>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float32>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestInt8")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum3dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd4d<DataType::Float16>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float16>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd4d<DataType::Float32>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float32>(neonDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestInt8")
+{
+ ReduceEndToEnd4d<DataType::QAsymmS8>(neonDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("NeonReduceSum4dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::QAsymmS8>(neonDefaultBackends, ReduceOperation::Sum, true);
}
// Reshape
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 3e04a19df4..7b7d1563bc 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1547,6 +1547,7 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
DataType::Float16,
DataType::QAsymmS8,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 6f57236dd5..67f805cf5e 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -28,6 +28,7 @@
#include <backendsCommon/test/GatherNdEndToEndTestImpl.hpp>
#include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
#include <backendsCommon/test/LogSoftmaxEndToEndTestImpl.hpp>
+#include <backendsCommon/test/PadEndToEndTestImpl.hpp>
#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
#include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
#include <backendsCommon/test/QLstmEndToEndTestImpl.hpp>
@@ -93,6 +94,56 @@ TEST_CASE("RefRsqrtEndToEndTestInt16")
UnaryOperation::Rsqrt);
}
+// Exp
+TEST_CASE("RefExpEndToEndTestFloat32")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+ UnaryOperation::Exp);
+}
+
+TEST_CASE("RefExpEndToEndTestUint8")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+ UnaryOperation::Exp);
+}
+
+TEST_CASE("RefExpEndToEndTestInt8")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends,
+ UnaryOperation::Exp);
+}
+
+TEST_CASE("RefExpEndToEndTestInt16")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QSymmS16>(defaultBackends,
+ UnaryOperation::Exp);
+}
+
+// Log
+TEST_CASE("RefLogEndToEndTestFloat32")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+ UnaryOperation::Log);
+}
+
+TEST_CASE("RefLogEndToEndTestUint8")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+ UnaryOperation::Log);
+}
+
+TEST_CASE("RefLogEndToEndTestSint8")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends,
+ UnaryOperation::Log);
+}
+
+TEST_CASE("RefLogEndToEndTestInt16")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QSymmS16>(defaultBackends,
+ UnaryOperation::Log);
+}
+
// Addition
TEST_CASE("RefAdditionEndtoEndFloat32")
{
@@ -501,6 +552,46 @@ TEST_CASE("RefBatchMatMulEndToEndInt8Test")
BatchMatMulEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
}
+TEST_CASE("RefBatchMatMulNoTransposeEndToEndFloat32Test")
+{
+ BatchMatMulNoTransposeEndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefBatchMatMulNoTransposeEndToEndInt8Test")
+{
+ BatchMatMulNoTransposeEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+TEST_CASE("RefBatchMatMulSimple4DEndToEndFloat32Test")
+{
+ BatchMatMulSimple4DEndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefBatchMatMulSimple4DEndToEndInt8Test")
+{
+ BatchMatMulSimple4DEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+TEST_CASE("RefBatchMatMul4DEndToEndFloat32Test")
+{
+ BatchMatMul4DEndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefBatchMatMul4DEndToEndInt8Test")
+{
+ BatchMatMul4DEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+TEST_CASE("RefBatchMatMulNotSquareEndToEndFloat32Test")
+{
+ BatchMatMulNotSquareEndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefBatchMatMulNotSquareEndToEndInt8Test")
+{
+ BatchMatMulNotSquareEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NHWCTest")
{
BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
@@ -669,6 +760,13 @@ TEST_CASE("RefDepthwiseConvolution2dEndtoEndFloat32Test")
armnn::DataLayout::NHWC);
}
+TEST_CASE("RefDepthwiseConvolution2dEndtoEndFloat32TestBiasDisabled")
+{
+ DepthwiseConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+ armnn::DataLayout::NHWC,
+ false);
+}
+
TEST_CASE("RefFillEndToEndTest")
{
FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
@@ -684,8 +782,67 @@ TEST_CASE("RefFillEndToEndTestInt32")
FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
}
+// Fully Connected
TEST_CASE("RefFullyConnectedEndToEndTestFloat32")
{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::Float32>(defaultBackends, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestNoBiasFloat32")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::Float32>(defaultBackends, false);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestInt8")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QAsymmS8,
+ armnn::DataType::QAsymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QAsymmS8>(defaultBackends, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestNoBiasInt8")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QAsymmS8,
+ armnn::DataType::QAsymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QAsymmS8>(defaultBackends, false);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestInt8Symm")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QSymmS8,
+ armnn::DataType::QSymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QSymmS8>(defaultBackends, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestNoBiasInt8Symm")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QSymmS8,
+ armnn::DataType::QSymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QSymmS8>(defaultBackends, false);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestUint8")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QAsymmU8,
+ armnn::DataType::QAsymmU8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QAsymmU8>(defaultBackends, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestNoBiasUint8")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QAsymmU8,
+ armnn::DataType::QAsymmU8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QAsymmU8>(defaultBackends, false);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestNoBiasOtherFloat32")
+{
FullyConnectedWithDynamicWeightsEndToEnd<armnn::DataType::Float32>(defaultBackends);
}
@@ -724,6 +881,7 @@ TEST_CASE("RefFullyConnectedEndToEndTestBiasDisabledConnectBias")
FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, true, false, false, true, true);
}
+// Gather
TEST_CASE("RefGatherFloatTest")
{
GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);
@@ -1095,6 +1253,32 @@ TEST_CASE("RefReLuEndToEndTestQSymmS16")
ActivationEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends, ActivationFunction::ReLu);
}
+// GeLu
+TEST_CASE("RefGeluEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<armnn::DataType::Float32>(defaultBackends, ActivationFunction::Gelu);
+}
+
+TEST_CASE("RefGeluEndToEndTestFloat16")
+{
+ ActivationEndToEndTest<armnn::DataType::Float16>(defaultBackends, ActivationFunction::Gelu);
+}
+
+TEST_CASE("RefGeluEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends, ActivationFunction::Gelu);
+}
+
+TEST_CASE("RefGeluEndToEndTestQAsymmU8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends, ActivationFunction::Gelu);
+}
+
+TEST_CASE("RefGeluEndToEndTestQSymmS16")
+{
+ ActivationEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends, ActivationFunction::Gelu);
+}
+
// BoundedReLu
TEST_CASE("RefBoundedReLuEndToEndTestFloat32")
{
@@ -1322,6 +1506,27 @@ TEST_CASE("RefMaxPool2DThreeLayerEndtoEndTestFloat32")
MaxPool2dThreeLayerEndToEnd<DataType::Float32>(defaultBackends);
}
+// Pad
+TEST_CASE("RefPadEndToEndFloat32Test")
+{
+ PadEndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefPadEndToEndInt8Test")
+{
+ PadEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
+TEST_CASE("RefPad4dEndToEndFloat32Test")
+{
+ Pad4dEndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+TEST_CASE("RefPad4dEndToEndInt8Test")
+{
+ Pad4dEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
+}
+
// Quantization
TEST_CASE("QuantizationEndToEndFloat32_U8Test")
{
@@ -1920,14 +2125,125 @@ TEST_CASE("RefRankEndToEndTestQSymmS8")
}
// Reduce
-TEST_CASE("RefReduceEndToEndTest")
+// Reduce Sum
+TEST_CASE("RefReduceSum2dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd2d<DataType::Signed32>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum2dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Signed32>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum2dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd2d<DataType::Float16>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum2dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float16>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum2dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd2d<DataType::Float32>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum2dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float32>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum2dEndtoEndTestInt8")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum2dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd3d<DataType::Float16>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float16>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd3d<DataType::Float32>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float32>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestInt8")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum3dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum4dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum4dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum4dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd4d<DataType::Float16>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum4dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float16>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum4dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd4d<DataType::Float32>(defaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("RefReduceSum4dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float32>(defaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("RefReduceSum4dEndtoEndTestInt8")
{
- ReduceEndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ReduceEndToEnd4d<DataType::QAsymmS8>(defaultBackends, ReduceOperation::Sum);
}
-TEST_CASE("RefReduceEndToEndTestFloat16")
+TEST_CASE("RefReduceSum4dEndtoEndTestInt8WithKeepDims")
{
- ReduceEndToEnd<armnn::DataType::Float16>(defaultBackends);
+ ReduceEndToEnd4d<DataType::QAsymmS8>(defaultBackends, ReduceOperation::Sum, true);
}
// Reshape
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 078338163f..eef70a9b10 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1693,6 +1693,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322d, PadFloat322dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat323d, PadFloat323dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat324d, PadFloat324dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat325d, PadFloat325dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82d, PadUint82dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
@@ -2239,6 +2240,30 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dFloat32, StridedSlice3dFloat32Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dReverseFloat32, StridedSlice3dReverseFloat32Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dFloat32, StridedSlice2dFloat32Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dReverseFloat32, StridedSlice2dReverseFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask1Float32, StridedSlice3dNewAxisMask1Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask2Float32, StridedSlice3dNewAxisMask2Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask4Float32, StridedSlice3dNewAxisMask4Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask1Float32, StridedSlice3dEllipsisMask1Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask2Float32, StridedSlice3dEllipsisMask2Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask4Float32, StridedSlice3dEllipsisMask4Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask1EllipsisMask1Float32, StridedSlice3dNewAxisMask1EllipsisMask1Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask1EllipsisMask2Float32, StridedSlice3dNewAxisMask1EllipsisMask2Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask1EllipsisMask4Float32, StridedSlice3dNewAxisMask1EllipsisMask4Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask2EllipsisMask1Float32, StridedSlice3dNewAxisMask2EllipsisMask1Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask2EllipsisMask2Float32, StridedSlice3dNewAxisMask2EllipsisMask2Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask2EllipsisMask4Float32, StridedSlice3dNewAxisMask2EllipsisMask4Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask4EllipsisMask1Float32, StridedSlice3dNewAxisMask4EllipsisMask1Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask4EllipsisMask2Float32, StridedSlice3dNewAxisMask4EllipsisMask2Float32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(
+ StridedSlice3dNewAxisMask4EllipsisMask4Float32, StridedSlice3dNewAxisMask4EllipsisMask4Float32Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dUint8, StridedSlice4dUint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dReverseUint8, StridedSlice4dReverseUint8Test)
@@ -2267,6 +2292,12 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dUint8, StridedSlice3dUint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dReverseUint8, StridedSlice3dReverseUint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dUint8, StridedSlice2dUint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dReverseUint8, StridedSlice2dReverseUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask1Uint8, StridedSlice3dNewAxisMask1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask2Uint8, StridedSlice3dNewAxisMask2Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask4Uint8, StridedSlice3dNewAxisMask4Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask1Uint8, StridedSlice3dEllipsisMask1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask2Uint8, StridedSlice3dEllipsisMask2Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask4Uint8, StridedSlice3dEllipsisMask4Uint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dInt16, StridedSlice4dInt16Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dReverseInt16, StridedSlice4dReverseInt16Test)
@@ -2277,6 +2308,12 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dInt16, StridedSlice3dInt16Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dReverseInt16, StridedSlice3dReverseInt16Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dInt16, StridedSlice2dInt16Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dReverseInt16, StridedSlice2dReverseInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask1Int16, StridedSlice3dNewAxisMask1Int16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask2Int16, StridedSlice3dNewAxisMask2Int16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dNewAxisMask4Int16, StridedSlice3dNewAxisMask4Int16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask1Int16, StridedSlice3dEllipsisMask1Int16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask2Int16, StridedSlice3dEllipsisMask2Int16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dEllipsisMask4Int16, StridedSlice3dEllipsisMask4Int16Test)
// Debug
ARMNN_AUTO_TEST_CASE(Debug4dFloat32, Debug4dFloat32Test, /*toFile*/ false)
diff --git a/src/backends/reference/workloads/Broadcast.cpp b/src/backends/reference/workloads/Broadcast.cpp
index 24af0fc4b1..f17ec6b311 100644
--- a/src/backends/reference/workloads/Broadcast.cpp
+++ b/src/backends/reference/workloads/Broadcast.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,13 +38,31 @@ BroadcastLoop::BroadcastLoop(const TensorShape& inShape, const TensorShape& outS
unsigned int sIn = 1;
unsigned int sOut = 1;
+ // Get the difference between the output dimension and input dimension
+ const unsigned int dimDifference = numDims - inShape.GetNumDimensions();
+
for (unsigned int j = numDims - 1, k = 0; k < numDims ; k++, j--)
{
+
m_DimData[j].m_DimSize = outShape[j];
- m_DimData[j].m_Stride1 = (inShape[j] > 1) ? sIn : 0;
+ // Pretend there are extra 1-dimensional tensors prepended
+ if (dimDifference > 0 && j < dimDifference)
+ {
+ m_DimData[j].m_Stride1 = 0;
+ sIn *= 1;
+ }
+ else if (dimDifference > 0)
+ {
+ m_DimData[j].m_Stride1 = (inShape[j - dimDifference] > 1) ? sIn : 0;
+ sIn *= inShape[j - dimDifference];
+ }
+ else
+ {
+ m_DimData[j].m_Stride1 = (inShape[j] > 1) ? sIn : 0;
+ sIn *= inShape[j];
+ }
m_DimData[j].m_StrideOut = sOut;
- sIn *= inShape[j];
sOut *= outShape[j];
}
}
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index 4044f06ac4..1d1ca5a856 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2021,2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -67,6 +67,7 @@ template struct armnn::ElementwiseBinaryFunction<std::plus<float>>;
template struct armnn::ElementwiseBinaryFunction<std::minus<float>>;
template struct armnn::ElementwiseBinaryFunction<std::multiplies<float>>;
template struct armnn::ElementwiseBinaryFunction<std::divides<float>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::floorDiv<float>>;
template struct armnn::ElementwiseBinaryFunction<armnn::maximum<float>>;
template struct armnn::ElementwiseBinaryFunction<armnn::minimum<float>>;
template struct armnn::ElementwiseBinaryFunction<armnn::power<float>>;
@@ -76,6 +77,7 @@ template struct armnn::ElementwiseBinaryFunction<std::plus<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<std::minus<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<std::multiplies<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<std::divides<int32_t>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::floorDiv<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<armnn::maximum<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<armnn::minimum<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<armnn::power<int32_t>>;
diff --git a/src/backends/reference/workloads/Maximum.hpp b/src/backends/reference/workloads/Maximum.hpp
index ca4b480b51..1e1f02d68a 100644
--- a/src/backends/reference/workloads/Maximum.hpp
+++ b/src/backends/reference/workloads/Maximum.hpp
@@ -1,24 +1,36 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <iostream>
+
namespace armnn
{
template<typename T>
struct maximum
+{
+ typedef T result_type;
+ typedef T first_argument_type;
+
+ T operator () (const T& inputData0, const T& inputData1) const
+ {
+ return std::max(inputData0, inputData1);
+ }
+};
+
+template<typename T>
+struct floorDiv
{
typedef T result_type;
typedef T first_argument_type;
- T
- operator () (const T& inputData0, const T& inputData1) const
+ T operator () (const T& inputData0, const T& inputData1) const
{
- return std::max(inputData0, inputData1);
+ double result = static_cast<double>(inputData0)/static_cast<double>(inputData1);
+ return static_cast<T>(std::floor(result));
}
};
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index f58dbaea61..8273d34365 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -63,7 +63,9 @@ void Pad(const TensorInfo& inputInfo,
unsigned int inputChannels = 0;
unsigned int inputHeight = 0;
unsigned int inputWidth = 0;
+ unsigned int inputDim5 = 0;
+ unsigned int outputBatches = 0;
unsigned int outputChannels = 0;
unsigned int outputHeight = 0;
unsigned int outputWidth = 0;
@@ -76,6 +78,7 @@ void Pad(const TensorInfo& inputInfo,
{
// For Quantized types Pad Value should not be quantized with scale and offset of the tensor info
auto temporaryInfo = TensorInfo(outputInfo.GetShape(), outputInfo.GetDataType(), 1.0f, 0);
+
auto outputData = MakeEncoder<float>(temporaryInfo, outputHandle->Map());
FillOutputWithPadValue(*outputData, padValue, numOutputElements);
}
@@ -95,13 +98,13 @@ void Pad(const TensorInfo& inputInfo,
{
input[w];
auto inputValue = input.Get();
- auto outputIndex = w + std::get<0>(padList[0]);
+ auto outputIndex = w + padList[0].first;
output[outputIndex];
output.Set(inputValue);
}
break;
- case 2 :
+ case 2:
inputHeight = inputShape[0];
inputWidth = inputShape[1];
outputWidth = outputShape[1];
@@ -112,14 +115,14 @@ void Pad(const TensorInfo& inputInfo,
{
input[h * inputWidth + w];
auto inputValue = input.Get();
- auto outputIndex = (h + std::get<0>(padList[0])) * outputWidth + (w + std::get<0>(padList[1]));
+ auto outputIndex = (h + padList[0].first) * outputWidth + (w + padList[1].first);
output[outputIndex];
output.Set(inputValue);
}
}
break;
- case 3 :
+ case 3:
inputChannels = inputShape[0];
inputHeight = inputShape[1];
inputWidth = inputShape[2];
@@ -134,9 +137,9 @@ void Pad(const TensorInfo& inputInfo,
{
input[c * inputHeight * inputWidth + h * inputWidth + w];
auto inputValue = input.Get();
- auto outputIndex = (c + std::get<0>(padList[0])) * outputHeight * outputWidth
- + (h + std::get<0>(padList[1])) * outputWidth
- + (w + std::get<0>(padList[2]));
+ auto outputIndex = (c + padList[0].first) * outputHeight * outputWidth
+ + (h + padList[1].first) * outputWidth
+ + (w + padList[2].first);
output[outputIndex];
output.Set(inputValue);
}
@@ -144,7 +147,7 @@ void Pad(const TensorInfo& inputInfo,
}
break;
- case 4 :
+ case 4:
inputBatches = inputShape[0];
inputChannels = inputShape[1];
inputHeight = inputShape[2];
@@ -162,24 +165,69 @@ void Pad(const TensorInfo& inputInfo,
for (unsigned int w = 0; w < inputWidth ; w++)
{
input[b * inputChannels * inputHeight * inputWidth
- + c * inputHeight * inputWidth
- + h * inputWidth
- + w];
+ + c * inputHeight * inputWidth
+ + h * inputWidth
+ + w];
auto inputValue = input.Get();
- auto outputIndex = (b + std::get<0>(padList[0]))
+ auto outputIndex = (b + padList[0].first)
* outputChannels * outputHeight * outputWidth
- + (c + std::get<0>(padList[1])) * outputHeight * outputWidth
- + (h + std::get<0>(padList[2])) * outputWidth
- + (w + std::get<0>(padList[3]));
+ + (c + padList[1].first) * outputHeight * outputWidth
+ + (h + padList[2].first) * outputWidth
+ + (w + padList[3].first);
output[outputIndex];
output.Set(inputValue);
}
}
}
}
+ break;
+ case 5:
+ inputBatches = inputShape[0];
+ inputChannels = inputShape[1];
+ inputHeight = inputShape[2];
+ inputWidth = inputShape[3];
+ inputDim5 = inputShape[4];
+
+ outputBatches = outputShape[1];
+ outputChannels = outputShape[2];
+ outputHeight = outputShape[3];
+ outputWidth = outputShape[4];
+
+ for (unsigned int b = 0; b < inputBatches; ++b)
+ {
+ for (unsigned int c = 0; c < inputChannels; ++c)
+ {
+ for (unsigned int h = 0; h < inputHeight; ++h)
+ {
+ for (unsigned int w = 0; w < inputWidth ; ++w)
+ {
+ for (unsigned int d = 0; d < inputDim5 ; ++d)
+ {
+ input[b * inputChannels * inputHeight * inputWidth * inputDim5
+ + c * inputHeight * inputWidth * inputDim5
+ + h * inputWidth * inputDim5
+ + d];
+
+ auto inputValue = input.Get();
+
+ auto outputIndex = (b + padList[0].first)
+ * outputBatches * outputChannels * outputHeight * outputWidth
+ + (c + padList[1].first) * outputChannels * outputHeight*outputWidth
+ + (h + padList[2].first) * outputHeight * outputWidth
+ + (w + padList[3].first) * outputWidth
+ + (d + padList[4].first);
+
+ output[outputIndex];
+ output.Set(inputValue);
+ }
+ }
+ }
+ }
+ }
break;
- default :
+
+ default:
break;
}
}
diff --git a/src/backends/reference/workloads/RefCastWorkload.cpp b/src/backends/reference/workloads/RefCastWorkload.cpp
index 40fbce6f4e..c8484d9672 100644
--- a/src/backends/reference/workloads/RefCastWorkload.cpp
+++ b/src/backends/reference/workloads/RefCastWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -12,17 +12,54 @@
namespace
{
- void Cast(armnn::Decoder<float>& in, armnn::Encoder<float>& out, const uint32_t numElements )
+ void Cast(armnn::Decoder<float>& in, armnn::Encoder<float>& out,
+ const uint32_t numElements, const armnn::DataType OutputDataType)
{
- for (unsigned int i = 0; i < numElements; i++)
+ for (unsigned int i = 0; i < numElements; ++i)
+ {
+ switch (OutputDataType)
+ {
+ case armnn::DataType::Float32:
+ case armnn::DataType::Float16:
+ case armnn::DataType::BFloat16:
+ out.Set(in.Get());
+ break;
+ default:
+ out.Set(std::floor(in.Get()));
+ break;
+ }
+ ++in;
+ ++out;
+ }
+ }
+
+
+ // Cast Float to Int64
+ void Cast(armnn::Decoder<float>& in, armnn::Encoder<double_t>& out,
+ const uint32_t numElements, const armnn::DataType)
+ {
+ for (unsigned int i = 0; i < numElements; ++i)
{
out.Set(in.Get());
++in;
++out;
}
}
+
+ // Cast Int64 To Float
+ void Cast(armnn::Decoder<double_t>& in, armnn::Encoder<float>& out,
+ const uint32_t numElements, const armnn::DataType)
+ {
+ for (unsigned int i = 0; i < numElements; ++i)
+ {
+ out.Set(static_cast<float>(in.Get()));
+ ++in;
+ ++out;
+ }
+ }
}
+
namespace armnn
{
@@ -56,9 +93,27 @@ void RefCastWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<IT
outputTensorInfo.SetQuantizationOffset(0);
}
- Cast(*MakeDecoder<float>(inputTensorInfo, inputs[0]->Map()),
- *MakeEncoder<float>(outputTensorInfo, outputs[0]->Map()),
- inputTensorInfo.GetNumElements());
+ if(inputTensorInfo.GetDataType() == DataType::Signed64)
+ {
+ Cast(*MakeDecoder<double_t>(inputTensorInfo, inputs[0]->Map()),
+ *MakeEncoder<float>(outputTensorInfo, outputs[0]->Map()),
+ inputTensorInfo.GetNumElements(),
+ outputTensorInfo.GetDataType());
+ }
+ else if(outputTensorInfo.GetDataType() == DataType::Signed64)
+ {
+ Cast(*MakeDecoder<float>(inputTensorInfo, inputs[0]->Map()),
+ *MakeEncoder<double_t>(outputTensorInfo, outputs[0]->Map()),
+ inputTensorInfo.GetNumElements(),
+ outputTensorInfo.GetDataType());
+ }
+ else
+ {
+ Cast(*MakeDecoder<float>(inputTensorInfo, inputs[0]->Map()),
+ *MakeEncoder<float>(outputTensorInfo, outputs[0]->Map()),
+ inputTensorInfo.GetNumElements(),
+ outputTensorInfo.GetDataType());
+ }
}
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
index 2f30dff211..0cefe0f20d 100644
--- a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,8 @@ namespace armnn
template<typename DataType>
void ExecuteFunction(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs,
- BinaryOperation operation)
+ BinaryOperation operation,
+ const std::string& layerName = "")
{
const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
@@ -42,6 +43,7 @@ void ExecuteFunction(std::vector<ITensorHandle*> inputs,
using AddFunction = ElementwiseBinaryFunction<std::plus<DataType>>;
using DivFunction = ElementwiseBinaryFunction<std::divides<DataType>>;
+ using FloorDivFunction = ElementwiseBinaryFunction<armnn::floorDiv<DataType>>;
using MaximumFunction = ElementwiseBinaryFunction<armnn::maximum<DataType>>;
using MinimumFunction = ElementwiseBinaryFunction<armnn::minimum<DataType>>;
using MulFunction = ElementwiseBinaryFunction<std::multiplies<DataType>>;
@@ -49,6 +51,7 @@ void ExecuteFunction(std::vector<ITensorHandle*> inputs,
using SqDiffFunction = ElementwiseBinaryFunction<armnn::squaredDifference<DataType>>;
using PowerFunction = ElementwiseBinaryFunction<armnn::power<DataType>>;
+
switch (operation)
{
case BinaryOperation::Add:
@@ -58,7 +61,14 @@ void ExecuteFunction(std::vector<ITensorHandle*> inputs,
}
case BinaryOperation::Div:
{
- DivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+ if(!layerName.empty() && layerName.find("FloorDiv") != std::string::npos)
+ {
+ FloorDivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+ }
+ else
+ {
+ DivFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+ }
break;
}
case BinaryOperation::Maximum:
@@ -123,11 +133,11 @@ void RefElementwiseBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs,
if (GetTensorInfo(inputs[0]).GetDataType() == DataType::Signed32)
{
- ExecuteFunction<int32_t>(inputs, outputs, m_Data.m_Parameters.m_Operation);
+ ExecuteFunction<int32_t>(inputs, outputs, m_Data.m_Parameters.m_Operation, m_Name);
}
else
{
- ExecuteFunction<float>(inputs, outputs, m_Data.m_Parameters.m_Operation);
+ ExecuteFunction<float>(inputs, outputs, m_Data.m_Parameters.m_Operation, m_Name);
}
}
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index 534a063ed5..1232e9f373 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ void Slice(const TensorInfo& inputInfo,
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int numDims = inputShape.GetNumDimensions();
- constexpr unsigned int maxNumDims = 4;
+ constexpr unsigned int maxNumDims = 5;
if (descriptor.m_Begin.size() != numDims)
{
std::stringstream msg;
@@ -43,9 +43,9 @@ void Slice(const TensorInfo& inputInfo,
throw InvalidArgumentException(msg.str());
}
- std::vector<unsigned int> paddedInput(4);
- std::vector<unsigned int> paddedBegin(4);
- std::vector<unsigned int> paddedSize (4);
+ std::vector<unsigned int> paddedInput(5);
+ std::vector<unsigned int> paddedBegin(5);
+ std::vector<unsigned int> paddedSize (5);
const unsigned int numPaddingDims = maxNumDims - numDims;
for (unsigned int i = 0u; i < maxNumDims; ++i)
@@ -69,16 +69,19 @@ void Slice(const TensorInfo& inputInfo,
unsigned int dim1 = paddedInput[1];
unsigned int dim2 = paddedInput[2];
unsigned int dim3 = paddedInput[3];
+ unsigned int dim4 = paddedInput[4];
unsigned int begin0 = paddedBegin[0];
unsigned int begin1 = paddedBegin[1];
unsigned int begin2 = paddedBegin[2];
unsigned int begin3 = paddedBegin[3];
+ unsigned int begin4 = paddedBegin[4];
unsigned int size0 = paddedSize[0];
unsigned int size1 = paddedSize[1];
unsigned int size2 = paddedSize[2];
unsigned int size3 = paddedSize[3];
+ unsigned int size4 = paddedSize[4];
if (begin0 + size0 > dim0)
{
@@ -129,11 +132,14 @@ void Slice(const TensorInfo& inputInfo,
{
for (unsigned int idx3 = begin3; idx3 < begin3 + size3; ++idx3)
{
- const unsigned int inputOffset =
- (((idx0 * dim1 + idx1) * dim2 + idx2) * dim3 + idx3) * dataTypeSize;
-
- ::memcpy(output, input + inputOffset, dataTypeSize);
- output += dataTypeSize;
+ for (unsigned int idx4 = begin4; idx4 < begin4 + size4; ++idx4)
+ {
+ const unsigned int inputOffset =
+ ((((idx0 * dim1 + idx1) * dim2 + idx2) * dim3 + idx3) * dim4 + idx4) * dataTypeSize;
+
+ ::memcpy(output, input + inputOffset, dataTypeSize);
+ output += dataTypeSize;
+ }
}
}
}
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index fcd1c357f8..a8828fdfbe 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -108,34 +108,105 @@ void StridedSlice(const TensorInfo& inputInfo,
// Pad parameters to 4 dimensions
PadParams(paddedParams, 4);
- const int start0 = paddedParams.GetStartForAxis(inputShape, 0);
- const int stop0 = paddedParams.GetStopForAxis (inputShape, 0, start0);
+ // Arrays containing the start and stop index for each axis (adjusted by set params/flags)
+ int startArray [4] = {0};
+ int stopArray [4] = {0};
- const int start1 = paddedParams.GetStartForAxis(inputShape, 1);
- const int stop1 = paddedParams.GetStopForAxis (inputShape, 1, start1);
+ // Getting paddedParams stop and start values for each axis
+ for(unsigned int i = 0; i < 4; ++i)
+ {
+ startArray[i] = paddedParams.GetStartForAxis(inputShape, i);
+ stopArray[i] = paddedParams.GetStopForAxis(inputShape, i, startArray[i]);
+ }
- const int start2 = paddedParams.GetStartForAxis(inputShape, 2);
- const int stop2 = paddedParams.GetStopForAxis (inputShape, 2, start2);
+ // Adjusting the EllipsisMask based on the NewAxisMask
+ // (if NewAxisMask extends an axis, the ellipsis flag is extended as well)
+ if(paddedParams.m_NewAxisMask > 0 && paddedParams.m_EllipsisMask > 0)
+ {
+ // Iterate until the current EllipsisMask 1-bit found
+ for(unsigned int i = 0; i < 4; ++i)
+ {
+ // If EllipsisMask bit found, adjust based on NewAxisMask and exit loop
+ if(paddedParams.m_EllipsisMask & (1 << i) && !(paddedParams.m_NewAxisMask & (1 << i)))
+ {
+ // If the previous bit is the NewAxisMask, set the EllipsisMask there
+ // (this condition was determined based on the unit tests expected data)
+ if(paddedParams.m_NewAxisMask & (1 << (i-1)))
+ {
+ paddedParams.m_EllipsisMask |= (1 << (i-1));
+ }
+ // Otherwise, extend the EllipsisMask by one bit
+ else
+ {
+ paddedParams.m_EllipsisMask |= (1 << (i+1));
+ }
+ break;
+ }
+ }
+ }
- const int start3 = paddedParams.GetStartForAxis(inputShape, 3);
- const int stop3 = paddedParams.GetStopForAxis (inputShape, 3, start3);
+ // Processing start and stop values based on the EllipsisMask and NewAxisMask
+ for(unsigned int i = 0, dimIdx = 0; i < 4; ++i)
+ {
+ // If the EllipsisMask is set, extend the start/stop to the input dimension size
+ if(paddedParams.m_EllipsisMask & (1 << dimIdx))
+ {
+ startArray[i] = 0;
+ stopArray[i] = armnn::numeric_cast<int>(inputShape[i]);
+ }
+ // Otherwise, if the NewAxisMask is set, shift all following start/stop values to the left
+ else if(paddedParams.m_NewAxisMask & (1 << dimIdx))
+ {
+ // Increment dimIdx - skip the current dimension for which NewAxisMask is set
+ ++dimIdx;
+ }
+
+ // If the index of the currently processed dimension is higher than
+ // the index of the current start/stop array position, shift start/stop values
+ if(dimIdx > i && !(paddedParams.m_EllipsisMask & (1 << dimIdx)))
+ {
+ if(dimIdx < 4)
+ {
+ startArray[i] = startArray[dimIdx];
+ stopArray[i] = stopArray[dimIdx];
+ }
+ else
+ {
+ // If dimIdx is greater than the amount of available dimensions,
+ // instead of shifting the next ones, create new start/stop values
+ if(paddedParams.m_EllipsisMask > 0)
+ {
+ // The new values are 0,1 if there is an EllipsisMask bit present
+ startArray[i] = 0;
+ stopArray[i] = 1;
+ }
+ else
+ {
+ // Otherwise, select the entire inputTensor dimension size
+ startArray[i] = 0;
+ stopArray[i] = armnn::numeric_cast<int>(inputShape[i]);
+ }
+ }
+ }
+ ++dimIdx;
+ }
const int step = armnn::numeric_cast<int>(dataTypeSize);
- for (int in0 = start0;
- !LoopCondition(in0, stop0, paddedParams.m_Stride[0]);
+ for (int in0 = startArray[0];
+ !LoopCondition(in0, stopArray[0], paddedParams.m_Stride[0]);
in0 += paddedParams.m_Stride[0])
{
- for (int in1 = start1;
- !LoopCondition(in1, stop1, paddedParams.m_Stride[1]);
+ for (int in1 = startArray[1];
+ !LoopCondition(in1, stopArray[1], paddedParams.m_Stride[1]);
in1 += paddedParams.m_Stride[1])
{
- for (int in2 = start2;
- !LoopCondition(in2, stop2, paddedParams.m_Stride[2]);
+ for (int in2 = startArray[2];
+ !LoopCondition(in2, stopArray[2], paddedParams.m_Stride[2]);
in2 += paddedParams.m_Stride[2])
{
- for (int in3 = start3;
- !LoopCondition(in3, stop3, paddedParams.m_Stride[3]);
+ for (int in3 = startArray[3];
+ !LoopCondition(in3, stopArray[3], paddedParams.m_Stride[3]);
in3 += paddedParams.m_Stride[3])
{
int dim1 = armnn::numeric_cast<int>(inputShape[1]);
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 8608776471..25b092f587 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -26,17 +26,25 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
case LayerType::Activation:
{
auto activationDesc = PolymorphicDowncast<const ActivationDescriptor*>(&descriptor);
- if (activationDesc->m_Function == ActivationFunction::LeakyReLu)
+ switch (activationDesc->m_Function)
{
- return ConvertLeakyReluToTosaOperator(layer, inputs, outputs, activationDesc);
- }
- if (activationDesc->m_Function == ActivationFunction::ReLu)
- {
- return ConvertReluToTosaOperator(layer, inputs, outputs, activationDesc);
- }
- else
- {
- return CreateEmptyTosaSerializationBasicBlock();
+ case ActivationFunction::LeakyReLu:
+ {
+ return ConvertLeakyReluToTosaOperator(layer, inputs, outputs, activationDesc);
+ }
+ case ActivationFunction::ReLu:
+ case ActivationFunction::BoundedReLu:
+ {
+ return ConvertReluToTosaOperator(layer, inputs, outputs, activationDesc);
+ }
+ case ActivationFunction::Gelu:
+ {
+ return ConvertGeluToTosaOperator(layer, inputs, outputs, activationDesc);
+ }
+ default:
+ {
+ return CreateEmptyTosaSerializationBasicBlock();
+ }
}
}
case LayerType::Addition:
@@ -53,7 +61,30 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
case LayerType::ElementwiseUnary:
{
auto unaryDesc = PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor);
- return ConvertElementwiseUnaryOperator(layer, inputs, outputs, unaryDesc);
+ switch(unaryDesc->m_Operation)
+ {
+ case UnaryOperation::Rsqrt:
+ {
+ return ConvertRsqrtOperator(layer, inputs, outputs, unaryDesc);
+ }
+ case UnaryOperation::Exp:
+ {
+ return ConvertExpOperator(layer, inputs, outputs, unaryDesc);
+ }
+ case UnaryOperation::Log:
+ {
+ return ConvertLogOperator(layer, inputs, outputs, unaryDesc);
+ }
+ default:
+ {
+ return CreateEmptyTosaSerializationBasicBlock();
+ }
+ }
+ }
+ case LayerType::BatchMatMul:
+ {
+ auto batchMatMulDesc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor);
+ return ConvertBatchMatMulToTosaOperator(layer, inputs, outputs, batchMatMulDesc);
}
case LayerType::Concat:
{
@@ -86,6 +117,16 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
auto conv2dDesc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
return ConvertDepthwiseConv2dToTosaOperator(layer, inputs, outputs, conv2dDesc);
}
+ case LayerType::FullyConnected:
+ {
+ auto fullyConnectedDesc = PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor);
+ return ConvertFullyConnectedToTosaOperator(layer, inputs, outputs, fullyConnectedDesc);
+ }
+ case LayerType::Pad:
+ {
+ auto padDesc = PolymorphicDowncast<const PadDescriptor*>(&descriptor);
+ return ConvertPadToTosaOperator(layer, inputs, outputs, padDesc);
+ }
case LayerType::Pooling2d:
{
auto poolDesc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
@@ -111,6 +152,11 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
{
return ConvertQuantizeToTosaOperator(layer, inputs, outputs);
}
+ case LayerType::Reduce:
+ {
+ auto reduceDesc = PolymorphicDowncast<const ReduceDescriptor*>(&descriptor);
+ return ConvertReduceToTosaOperator(layer, inputs, outputs, reduceDesc);
+ }
case LayerType::Reshape:
{
auto reshapeDesc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.cpp b/src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.cpp
new file mode 100644
index 0000000000..35a00302a0
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.cpp
@@ -0,0 +1,262 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "BatchMatMulOperator.hpp"
+#include "TosaRescaleOperatorUtils.hpp"
+
+// This function is paraphrased from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLBatchMatMulOp
+TosaSerializationBasicBlock* ConvertBatchMatMulToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const BatchMatMulDescriptor* descriptor)
+{
+ if (descriptor->m_AdjointX || descriptor->m_AdjointY )
+ {
+ throw Exception("Support for adjoint not implemented.");
+ }
+ if (descriptor->m_DataLayoutX != armnn::DataLayout::NCHW || descriptor->m_DataLayoutY != armnn::DataLayout::NCHW )
+ {
+ throw Exception("MatMul only supported in the last 2 dimensions");
+ }
+
+ std::string input0Name = std::string("input_0");
+ std::string input1Name = std::string("input_1");
+ std::string outputName = std::string("output_0");
+ std::string outputReshape0Name = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ std::string outputReshape1Name = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ std::string outputTranspose0Name = std::string("intermediate1_") + GetUniqueTosaMappingID();
+ std::string outputTranspose1Name = std::string("intermediate1_") + GetUniqueTosaMappingID();
+
+ std::string blockName = std::string("Op_BATCHMATMUL_block_") + GetUniqueTosaMappingID();
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if(layer != nullptr)
+ {
+ // Get the layer connected to the input slot and determine unique tensor names.
+ input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+ input1Name = GenerateUniqueInputName(layer->GetInputSlot(1));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ // Assumes both input types are same data type
+ DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
+ bool isInputInt8 = (inputDType == DType_INT8);
+ bool isInputInt16 = (inputDType == DType_INT16);
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Only add input tensors if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ if(input0Name.find("input_") != std::string::npos)
+ {
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType, {}));
+ }
+ if(input1Name.find("input_") != std::string::npos)
+ {
+ std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+ tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType, {}));
+ }
+
+ std::string input0TransposeName = input0Name;
+ std::string input1TransposeName = input1Name;
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+
+ std::string input0MatMulName = input0Name;
+ std::string input1MatMulName = input1Name;
+
+ // *** ADD OP STEPS ***
+
+ // ADD a RESHAPE OPs if BATCH DIMS > 1
+ // RESHAPE input 1
+ std::vector<int32_t> targetShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ std::vector<int32_t> transpose0Shape = GetTosaTensorShape(inputs[0]->GetShape());
+ uint32_t input0Dimensions = inputs[0]->GetNumDimensions();
+ if (input0Dimensions > 3)
+ {
+ uint32_t x = 1;
+ for (uint32_t i = 0; i < (input0Dimensions - 2); ++i)
+ {
+ x *=(inputs[0]->GetShape()[i]);
+ }
+
+ targetShape0 = {static_cast<int32_t>(x),
+ static_cast<int32_t>(inputs[0]->GetShape()[input0Dimensions - 2]),
+ static_cast<int32_t>(inputs[0]->GetShape()[input0Dimensions - 1])};
+
+ TosaReshapeAttribute attribute(targetShape0);
+
+ auto* input0ReshapeOp = new TosaSerializationOperator(Op_RESHAPE,
+ Attribute_ReshapeAttribute,
+ &attribute,
+ {input0Name},
+ {outputReshape0Name});
+
+ operators.push_back(input0ReshapeOp);
+ transpose0Shape = targetShape0;
+ tensors.push_back(new TosaSerializationTensor(outputReshape0Name, targetShape0, inputDType, {}));
+ input0TransposeName = outputReshape0Name;
+ input0MatMulName = outputReshape0Name;
+ }
+
+ // RESHAPE input 2
+ std::vector<int32_t> targetShape1 = GetTosaTensorShape(outputs[0]->GetShape());
+ std::vector<int32_t> transpose1Shape = GetTosaTensorShape(inputs[1]->GetShape());
+ uint32_t input1Dimensions = inputs[1]->GetNumDimensions();
+ if (input1Dimensions > 3)
+ {
+ uint32_t x = 1;
+ for (uint32_t i = 0; i < (input1Dimensions - 2); i++)
+ {
+ x *= (inputs[1]->GetShape()[i]);
+ }
+
+ targetShape1 = {static_cast<int32_t>(x),
+ static_cast<int32_t>(inputs[1]->GetShape()[input1Dimensions - 2]),
+ static_cast<int32_t>(inputs[1]->GetShape()[input1Dimensions - 1])};
+
+ TosaReshapeAttribute attribute(targetShape1);
+
+ auto* input1ReshapeOp = new TosaSerializationOperator(Op_RESHAPE,
+ Attribute_ReshapeAttribute,
+ &attribute,
+ {input1Name},
+ {outputReshape1Name});
+
+ operators.push_back(input1ReshapeOp);
+ transpose1Shape = targetShape1;
+ tensors.push_back(new TosaSerializationTensor(outputReshape1Name, targetShape1, inputDType, {}));
+ input1TransposeName = outputReshape1Name;
+ input1MatMulName = outputReshape1Name;
+ }
+ bool needsReshape = input0Dimensions > 3 || input1Dimensions > 3;
+
+ // ADD a TRANSPOSE OP for one/both inputs if transpose set to true
+ if (descriptor->m_TransposeX)
+ {
+ auto permuteVec = BatchMatMulDescriptor::GetPermuteVec(descriptor->m_DataLayoutX,
+ inputs[0]->GetShape());
+
+ std::vector<int32_t> mappings(permuteVec.begin(),
+ permuteVec.end());
+ TosaTransposeAttribute transposeAttribute(mappings);
+
+ TosaSerializationOperator *transposeOp = new TosaSerializationOperator(Op_TRANSPOSE,
+ Attribute_TransposeAttribute,
+ &transposeAttribute,
+ {input0TransposeName},
+ {outputTranspose0Name});
+ operators.push_back(transposeOp);
+ tensors.push_back(new TosaSerializationTensor(outputTranspose0Name, transpose0Shape, inputDType, {}));
+ input0MatMulName = outputTranspose0Name;
+ }
+
+ if (descriptor->m_TransposeY)
+ {
+ auto permuteVec = BatchMatMulDescriptor::GetPermuteVec(descriptor->m_DataLayoutY,
+ inputs[1]->GetShape());
+
+
+ std::vector<int32_t> mappings(permuteVec.begin(),
+ permuteVec.end());
+ TosaTransposeAttribute transposeAttribute(mappings);
+
+ TosaSerializationOperator *transposeOp = new TosaSerializationOperator(Op_TRANSPOSE,
+ Attribute_TransposeAttribute,
+ &transposeAttribute,
+ {input1TransposeName},
+ {outputTranspose1Name});
+ operators.push_back(transposeOp);
+ tensors.push_back(new TosaSerializationTensor(outputTranspose1Name, transpose1Shape, inputDType, {}));
+ input1MatMulName = outputTranspose1Name;
+ }
+
+ // ADD MAT MUL layer
+ std::string matMulOutputStr = needsReshape || isInputInt8 || isInputInt16 ?
+ std::string("intermediate2_") + GetUniqueTosaMappingID() : outputName;
+
+ TosaMatMulAttribute matMulAttribute(0,0); // input0_zp, input1_zp
+ DType matMulOutDType = ArmNNToDType(inputs[1]->GetDataType());
+ if (isInputInt8)
+ {
+ matMulAttribute = TosaMatMulAttribute(inputs[0]->GetQuantizationOffset(), inputs[1]->GetQuantizationOffset());
+ matMulOutDType = DType_INT32;
+ }
+ if (isInputInt16)
+ {
+ matMulAttribute = TosaMatMulAttribute(inputs[0]->GetQuantizationOffset(), inputs[1]->GetQuantizationOffset());
+ matMulOutDType = DType_INT48;
+ }
+ TosaSerializationOperator* matMulOp = new TosaSerializationOperator(Op_MATMUL,
+ Attribute_MatMulAttribute,
+ &matMulAttribute,
+ {input0MatMulName, input1MatMulName},
+ {matMulOutputStr});
+
+ operators.push_back(matMulOp);
+ tensors.push_back(new TosaSerializationTensor(matMulOutputStr, targetShape0, matMulOutDType, {}));
+
+ std::string outputRescale = needsReshape ?
+ std::string("intermediate3_") + GetUniqueTosaMappingID() : outputName;
+ std::string inputReshape2Name = isInputInt8 || isInputInt16 ? outputRescale : matMulOutputStr;
+
+ // ADD Rescale layer if it is int8
+ if (isInputInt8 || isInputInt16)
+ {
+ bool scale32 = isInputInt16 ? false : true;
+ bool doubleRound = isInputInt16 ? false : true;
+
+ double scale_alpha = inputs[0]->GetQuantizationScale() / outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+
+ TosaSerializationOperator* rescaleOp = nullptr;
+ CreateRescaleTosaOperator(matMulOutputStr,
+ outputRescale,
+ scale_alpha,
+ input_zp,
+ output_zp,
+ doubleRound,
+ scale32,
+ &rescaleOp);
+
+ tensors.push_back(new TosaSerializationTensor(outputRescale,
+ targetShape0,
+ inputDType, {}));
+
+ operators.push_back(rescaleOp);
+ }
+
+ // ADD a RESHAPE back to expected rank
+ if (needsReshape)
+ {
+ const std::vector<int32_t>& targetShape = GetTosaTensorShape(TensorShape(outputs[0]->GetShape()));
+ TosaReshapeAttribute attribute(targetShape);
+
+ auto* outputReshapeOp = new TosaSerializationOperator(Op_RESHAPE,
+ Attribute_ReshapeAttribute,
+ &attribute,
+ {inputReshape2Name},
+ {outputName});
+
+ operators.push_back(outputReshapeOp);
+ tensors.push_back(new TosaSerializationTensor(outputName, targetShape, inputDType, {}));
+ }
+
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ {operators}, // operators
+ tensors, // tensors
+ {input0Name, input1Name}, // inputs
+ {outputName}); // outputs
+}
+
diff --git a/src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.hpp b/src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.hpp
new file mode 100644
index 0000000000..0efd76f6f3
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/BatchMatMulOperator.hpp
@@ -0,0 +1,17 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertBatchMatMulToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const BatchMatMulDescriptor*
+ descriptor = nullptr); \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index bd86958de1..40091a7896 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -8,8 +8,12 @@ list(APPEND armnnTosaBackendOperators_sources
LeakyReluOperator.cpp
ReluOperator.hpp
ReluOperator.cpp
+ GeluOperator.hpp
+ GeluOperator.cpp
AvgPool2DIgnoreValueOperator.hpp
AvgPool2DIgnoreValueOperator.cpp
+ BatchMatMulOperator.hpp
+ BatchMatMulOperator.cpp
ConcatOperator.hpp
ConcatOperator.cpp
ConstantOperator.hpp
@@ -20,12 +24,22 @@ list(APPEND armnnTosaBackendOperators_sources
DepthwiseConv2dOperator.cpp
ElementwiseBinaryOperator.hpp
ElementwiseBinaryOperator.cpp
- ElementwiseUnaryOperator.cpp
- ElementwiseUnaryOperator.hpp
+ FullyConnectedOperator.cpp
+ FullyConnectedOperator.hpp
+ RsqrtOperator.cpp
+ RsqrtOperator.hpp
+ ExpOperator.cpp
+ ExpOperator.hpp
+ LogOperator.cpp
+ LogOperator.hpp
+ PadOperator.cpp
+ PadOperator.hpp
Pooling2DOperator.hpp
Pooling2DOperator.cpp
QuantizeOperator.hpp
QuantizeOperator.cpp
+ ReduceOperator.hpp
+ ReduceOperator.cpp
ReshapeOperator.hpp
ReshapeOperator.cpp
ResizeOperator.hpp
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.hpp b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.hpp
deleted file mode 100644
index 635abd6f3c..0000000000
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.hpp
+++ /dev/null
@@ -1,16 +0,0 @@
-//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TosaOperatorUtils.hpp"
-
-using namespace armnn;
-using namespace tosa;
-
-TosaSerializationBasicBlock* ConvertElementwiseUnaryOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ElementwiseUnaryDescriptor* unaryDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ExpOperator.cpp b/src/backends/tosaCommon/operatorMappings/ExpOperator.cpp
new file mode 100644
index 0000000000..72b309789a
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ExpOperator.cpp
@@ -0,0 +1,118 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ExpOperator.hpp"
+#include "TosaTableUtils.hpp"
+
+TosaSerializationBasicBlock* ConvertExpOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseUnaryDescriptor* unaryDescriptor)
+{
+ if (unaryDescriptor->m_Operation != UnaryOperation::Exp)
+ {
+ throw armnn::Exception("ConvertExpOperator: Unsupported elementwise unary operation in descriptor.");
+ }
+
+ std::string inputName = std::string("input_");
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_EXP_block_") + GetUniqueTosaMappingID();
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if(layer != nullptr)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ float input_scale = inputs[0]->GetQuantizationScale();
+ float output_scale = outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+ DataType inputDType = inputs[0]->GetDataType();
+ if (inputDType == DataType::QAsymmS8 ||
+ inputDType == DataType::QSymmS8)
+ {
+ auto exp_func = [](float x) -> float { return std::exp(x); };
+ TosaTableAttribute attribute(
+ getTosaConst8bitTable(input_scale, input_zp, output_scale, output_zp, exp_func));
+ operators.push_back(new TosaSerializationOperator(tosa::Op_TABLE,
+ Attribute_TableAttribute,
+ &attribute,
+ {inputName},
+ {outputName}));
+ }
+ else if (inputDType == DataType::QSymmS16)
+ {
+ throw Exception("ConvertExpOperator() unsupported int 16 not implemented yet.");
+ // The following generates the table, tosa attribute and operator for int16 exponential.
+ // However, running the int16 EXP EndToEnd test causes incorrect output values.
+ // At the time of writing the EXP operator there is no requirment for int16 support.
+ // Points to enable int16 in the future:
+ // - TOSA specifies EXP int16 input must have int32 output
+ // - We potentially need a rescale after the int32 EXP output to convert back to int16.
+ /*
+ auto exp_func = [](float x) -> float { return std::exp(x); };
+ TosaTableAttribute attribute(
+ getTosaConst16bitTable<float>(input_scale, input_zp, output_scale, output_zp, exp_func));
+ operators.push_back(new TosaSerializationOperator(tosa::Op_TABLE,
+ Attribute_TableAttribute,
+ &attribute,
+ {inputName},
+ {outputName}));
+ */
+ }
+ else if (inputDType == DataType::Signed32 ||
+ inputDType == DataType::Signed64)
+ {
+ throw Exception(
+ "ConvertExpOperator() unsupported int 32. Only int 8 and int 16 quantized types are supported.");
+ }
+ // Floating point EXP operator
+ else
+ {
+ operators.push_back(new TosaSerializationOperator(tosa::Op_EXP,
+ Attribute_NONE,
+ nullptr,
+ {inputName},
+ {outputName}));
+ }
+
+ // Only add input tensor if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ if(inputName.find("input_") != std::string::npos)
+ {
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ DType inputDType0 = ArmNNToDType(inputDType);
+ tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+ }
+
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+
+ // Re-enable below line for int16 EXP support which requires int32 output in TOSA and remove second line.
+ // DType outputDType0 =
+ // (inputDType == DataType::QSymmS16) ? DType::DType_INT32 : ArmNNToDType(outputs[0]->GetDataType());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ {inputName}, // inputs
+ {outputName}); // outputs
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/ExpOperator.hpp b/src/backends/tosaCommon/operatorMappings/ExpOperator.hpp
new file mode 100644
index 0000000000..5bdd411149
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ExpOperator.hpp
@@ -0,0 +1,16 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertExpOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseUnaryDescriptor* unaryDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.cpp b/src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.cpp
new file mode 100644
index 0000000000..331a6ec54a
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.cpp
@@ -0,0 +1,189 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <numeric>
+#include "FullyConnectedOperator.hpp"
+#include "TosaRescaleOperatorUtils.hpp"
+
+
+// This function is paraphrased from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLFullyConnectedOp
+TosaSerializationBasicBlock* ConvertFullyConnectedToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const FullyConnectedDescriptor* fcDescriptor)
+{
+ std::vector<std::string> inputNames;
+ std::vector<std::string> fcInputNames;
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_FULLY_CONNECTED_block_") + GetUniqueTosaMappingID();
+
+ DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+ // Set input names for validation purposes only.
+ if(layer == nullptr)
+ {
+ inputNames.emplace_back("input_0");
+ inputNames.emplace_back("constant_1");
+ if(fcDescriptor->m_BiasEnabled)
+ {
+ inputNames.emplace_back("constant_2");
+ }
+ }
+ // If a layer is present then the block will be used for execution, so input and output names need to be
+ // determined using the previous and following layers so the graph is connected correctly.
+ // For validation this doesn't matter.
+ else
+ {
+ // Get the layer connected to the input slot and determine unique tensor names.
+ for (uint32_t i = 0; i < inputs.size(); ++i)
+ {
+ std::string inputName = GenerateUniqueInputName(layer->GetInputSlot(i));
+ inputNames.push_back(inputName);
+ }
+
+ // Determine unique output tensor name.
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Setup input Tensor
+ // Only add tensor if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensors.
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ if(inputNames[0].find("input_") != std::string::npos)
+ {
+ tensors.push_back(new TosaSerializationTensor(inputNames[0], inputShape0, inputDType0, {}));
+ }
+
+ // Only add input tensors if weights and bias are not constant or if running validation.
+ // Constant tensors will be created in the ConvertConstantToTosaOperator function.
+ if(!inputs[1]->IsConstant() || layer == nullptr)
+ {
+ std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+ DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(inputNames[1], inputShape1, inputDType1, {}));
+ }
+
+ if(fcDescriptor->m_BiasEnabled)
+ {
+ if(!inputs[2]->IsConstant() || layer == nullptr)
+ {
+ std::vector<int32_t> inputShape2 = GetTosaTensorShape(inputs[2]->GetShape());
+ DType inputDType2 = ArmNNToDType(inputs[2]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(inputNames[2], inputShape2, inputDType2, {}));
+ }
+ }
+ else
+ {
+ // If bias is disabled, create a constant bias of 0 as three inputs are required.
+ std::string constantName = std::string("constant_") + GetUniqueTosaMappingID();
+
+ operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {constantName}));
+
+ const DType dType = (inputDType0 == DType_INT8) ? DType_INT32 : outputDType0;
+ std::vector<float> data(outputs[0]->GetShape()[1], 0);
+
+ std::vector<uint8_t> uint8Data;
+ TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
+
+ tensors.push_back(new TosaSerializationTensor(constantName,
+ {static_cast<int32_t>(outputs[0]->GetShape()[1])},
+ dType,
+ uint8Data));
+ inputNames.emplace_back(constantName);
+ }
+
+ fcInputNames = inputNames;
+
+ // Set up Reshape operator. TOSA Fully Connected only accepts 2D rank tensors.
+ if (inputs[0]->GetShape().GetNumDimensions() != 2)
+ {
+ uint32_t num_elems = inputs[1]->GetShape()[1];
+ uint32_t num_batch = inputs[0]->GetShape().GetNumElements() / num_elems;
+
+ std::string outputReshapeName = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ const std::vector<int32_t>& targetShape = {static_cast<int32_t>(num_batch), static_cast<int32_t>(num_elems)};
+ TosaReshapeAttribute attribute(GetTosaTensorShape(TensorShape({num_batch, num_elems})));
+
+ auto* reshapeOp = new TosaSerializationOperator(Op_RESHAPE,
+ Attribute_ReshapeAttribute,
+ &attribute,
+ {inputNames[0]},
+ {outputReshapeName});
+ operators.push_back(reshapeOp);
+
+ tensors.push_back(new TosaSerializationTensor(outputReshapeName, targetShape, inputDType0, {}));
+
+ fcInputNames[0] = outputReshapeName;
+ }
+
+
+ // Setup Output Tensor
+ std::vector<int32_t> outputShape0 = {GetTosaTensorShape(outputs[0]->GetShape())};
+ std::string fcOutputName;
+ bool isInputInt8 = (inputDType0 == DType_INT8);
+ if (isInputInt8)
+ {
+ fcOutputName = std::string("intermediate0_") + GetUniqueTosaMappingID();
+ tensors.push_back(new TosaSerializationTensor(fcOutputName, outputShape0, DType_INT32, {}));
+ }
+ else
+ {
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+ }
+
+ // Set up Fully Connected operator
+ TosaFullyConnectedAttribute attribute(inputs[0]->GetQuantizationOffset(), // input_zp
+ inputs[1]->GetQuantizationOffset()); // weight_zp
+
+ std::string& fcOutStr = isInputInt8 ? fcOutputName : outputName;
+ auto* fullyConnected_op = new TosaSerializationOperator(Op_FULLY_CONNECTED,
+ Attribute_FullyConnectedAttribute,
+ &attribute,
+ fcInputNames,
+ {fcOutStr});
+ operators.push_back(fullyConnected_op);
+
+ if (isInputInt8)
+ {
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+ double output_scale = outputs[0]->GetQuantizationScales()[0];
+ double input_scale = inputs[0]->GetQuantizationScales()[0];
+ const std::vector<float>& weight_scales = inputs[1]->GetQuantizationScales();
+
+ TosaSerializationOperator* rescaleOp = nullptr;
+ CreateRescaleTosaOperatorPerChannel(fcOutputName,
+ outputName,
+ 0,
+ output_zp,
+ true,
+ true,
+ input_scale,
+ output_scale,
+ weight_scales,
+ &rescaleOp);
+ operators.push_back(rescaleOp);
+ tensors.push_back(new TosaSerializationTensor(outputName,
+ outputShape0,
+ DType_INT8, {}));
+ }
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ inputNames, // inputs
+ {outputName}); // outputs
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.hpp b/src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.hpp
new file mode 100644
index 0000000000..12f888d01c
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/FullyConnectedOperator.hpp
@@ -0,0 +1,16 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertFullyConnectedToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const FullyConnectedDescriptor* fcDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/GeluOperator.cpp b/src/backends/tosaCommon/operatorMappings/GeluOperator.cpp
new file mode 100644
index 0000000000..9dd4f2ebc7
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/GeluOperator.cpp
@@ -0,0 +1,108 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "GeluOperator.hpp"
+#include "TosaTableUtils.hpp"
+
+#include <layers/ActivationLayer.hpp>
+
+// This function is paraphrased from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLGeluOp
+TosaSerializationBasicBlock* ConvertGeluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* desc)
+{
+ if (inputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertGeluToTosaOperator: 1 input tensors required.");
+ }
+
+ if (outputs.size() != 1)
+ {
+ throw armnn::Exception("ConvertGeluToTosaOperator: 1 output tensor required.");
+ }
+
+ if (desc->m_Function != ActivationFunction::Gelu)
+ {
+ throw armnn::Exception("ConvertGeluToTosaOperator ActivationDescriptor only supports function Gelu.");
+ }
+
+ std::string inputName = std::string("input_");
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_GELU_block_") + GetUniqueTosaMappingID();
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if (layer != nullptr)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ float input_scale = inputs[0]->GetQuantizationScale();
+ float output_scale = outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+ DataType inputDType = inputs[0]->GetDataType();
+
+ bool isInt8 = inputDType == DataType::QAsymmS8 || inputDType == DataType::QSymmS8;
+ if (isInt8)
+ {
+ auto gelu_transform = [](float in) -> float {
+ return 0.5f * in * std::erfc(in * static_cast<float>(-0.70710678118654752440));
+ };
+
+ TosaTableAttribute attribute(
+ getTosaConst8bitTable(input_scale, input_zp, output_scale, output_zp, gelu_transform));
+ operators.push_back(new TosaSerializationOperator(tosa::Op_TABLE,
+ Attribute_TableAttribute,
+ &attribute,
+ {inputName},
+ {outputName}));
+ }
+ else if (inputDType == DataType::QSymmS16 ||
+ inputDType == DataType::Signed32 ||
+ inputDType == DataType::Signed64)
+ {
+ throw Exception("ConvertGeluOperator() only supports int8 quantized types.");
+ }
+ else
+ {
+ throw Exception("ConvertGeluOperator() floating point types currently unimplemented.");
+ }
+
+ // Only add input tensors if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ std::vector<int32_t> inputShape0;
+ DType inputDType0 = DType::DType_UNKNOWN;
+ if(inputName.find("input_") != std::string::npos)
+ {
+ inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+ }
+
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ {inputName}, // inputs
+ {outputName}); // outputs
+}
diff --git a/src/backends/tosaCommon/operatorMappings/GeluOperator.hpp b/src/backends/tosaCommon/operatorMappings/GeluOperator.hpp
new file mode 100644
index 0000000000..30db68f114
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/GeluOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertGeluToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ActivationDescriptor* activationDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/LogOperator.cpp b/src/backends/tosaCommon/operatorMappings/LogOperator.cpp
new file mode 100644
index 0000000000..846950a000
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/LogOperator.cpp
@@ -0,0 +1,137 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "LogOperator.hpp"
+#include "TosaTableUtils.hpp"
+
+TosaSerializationBasicBlock* ConvertLogOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseUnaryDescriptor* unaryDescriptor)
+{
+ if (unaryDescriptor->m_Operation != UnaryOperation::Log)
+ {
+ throw armnn::Exception("ConvertLogOperator: Unsupported elementwise unary operation in descriptor.");
+ }
+
+ std::string inputName = std::string("input_");
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_LOG_block_") + GetUniqueTosaMappingID();
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if(layer != nullptr)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+ std::vector<TosaSerializationOperator*> operators;
+
+ float input_scale = inputs[0]->GetQuantizationScale();
+ float output_scale = outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
+ DataType inputDType = inputs[0]->GetDataType();
+
+ if (inputDType == DataType::QAsymmS8 ||
+ inputDType == DataType::QSymmS8)
+ {
+ const float output_min = static_cast<float>(-128 - output_zp) * output_scale;
+
+ auto log_func = [&](float x) -> float
+ {
+ if (x <= 0.0f)
+ {
+ return output_min;
+ }
+ return std::log(x);
+ };
+
+ TosaTableAttribute attribute(
+ getTosaConst8bitTable(input_scale, input_zp, output_scale, output_zp, log_func));
+ operators.push_back(new TosaSerializationOperator(tosa::Op_TABLE,
+ Attribute_TableAttribute,
+ &attribute,
+ {inputName},
+ {outputName}));
+ }
+ else if (inputDType == DataType::QSymmS16)
+ {
+ throw Exception("ConvertLogOperator() unsupported int 16 not implemented yet.");
+ // The following generates the table, tosa attribute and operator for int16 log.
+ // However, running the int16 LOG EndToEnd test causes incorrect output values.
+ // At the time of writing the LOG operator there is no requirment for int16 support.
+ // Points to enable int16 in the future:
+ // - TOSA specifies LOG int16 input must have int32 output
+ // - We potentially need a rescale after the int32 LOG output to convert back to int16.
+ /*
+ const float output_min = (-32768 - output_zp) * static_cast<float>(output_scale);
+
+ auto log_func = [&](float x) -> float {
+ if (x <= 0.0f) {
+ return output_min;
+ }
+ return std::log(x);
+ };
+
+ TosaTableAttribute attribute(
+ getTosaConst16bitTable<float>(input_scale, input_zp, output_scale, output_zp, log_func));
+ operators.push_back(new TosaSerializationOperator(tosa::Op_TABLE,
+ Attribute_TableAttribute,
+ &attribute,
+ {inputName},
+ {outputName}));
+ */
+ }
+ else if (inputDType == DataType::Signed32 ||
+ inputDType == DataType::Signed64)
+ {
+ throw Exception(
+ "ConvertLogOperator() unsupported int 32. Only int 8 and int 16 quantized types are supported.");
+ }
+ // Floating point LOG operator
+ else
+ {
+ operators.push_back(new TosaSerializationOperator(tosa::Op_LOG,
+ Attribute_NONE,
+ nullptr,
+ {inputName},
+ {outputName}));
+ }
+
+ // Only add input tensor if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ if(inputName.find("input_") != std::string::npos)
+ {
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ DType inputDType0 = ArmNNToDType(inputDType);
+ tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+ }
+
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+
+ // Re-enable below line for int16 LOG support which requires int32 output in TOSA and remove second line.
+ // DType outputDType0 =
+ // (inputDType == DataType::QSymmS16) ? DType::DType_INT32 : ArmNNToDType(outputs[0]->GetDataType());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ {inputName}, // inputs
+ {outputName}); // outputs
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/LogOperator.hpp b/src/backends/tosaCommon/operatorMappings/LogOperator.hpp
new file mode 100644
index 0000000000..5946d8d621
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/LogOperator.hpp
@@ -0,0 +1,16 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertLogOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseUnaryDescriptor* unaryDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/PadOperator.cpp b/src/backends/tosaCommon/operatorMappings/PadOperator.cpp
new file mode 100644
index 0000000000..c82dcb05a5
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/PadOperator.cpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PadOperator.hpp"
+#include <armnnUtils/QuantizeHelper.hpp>
+
+TosaSerializationBasicBlock* ConvertPadToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const PadDescriptor* padDescriptor)
+{
+ std::string inputName = std::string("input_");
+ std::string outputName = std::string("output0_");
+ std::string blockName = std::string("Op_PAD_block_") + GetUniqueTosaMappingID();
+
+ // If a layer is present then the block will be used for execution, so input and output names need to be determined
+ // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+ if(layer != nullptr)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<int32_t> padding;
+ padding.reserve(padDescriptor->m_PadList.size());
+ for (size_t it = 0; it < padDescriptor->m_PadList.size(); ++it) {
+ padding.push_back(static_cast<int32_t>(padDescriptor->m_PadList[it].first));
+ padding.push_back(static_cast<int32_t>(padDescriptor->m_PadList[it].second));
+ }
+
+ auto intPadValue = armnnUtils::SelectiveQuantize<int32_t>(padDescriptor->m_PadValue,
+ inputs[0]->GetQuantizationScale(),
+ inputs[0]->GetQuantizationOffset());
+ TosaPadAttribute padAttribute(padding, intPadValue ,padDescriptor->m_PadValue);
+
+ auto* op = new TosaSerializationOperator(Op_PAD,
+ Attribute_PadAttribute,
+ &padAttribute,
+ {inputName},
+ {outputName});
+
+ std::vector<TosaSerializationTensor*> tensors;
+
+ // Only add input tensors if connected layer is an input layer.
+ // As intermediate or constant tensors will be created separately.
+ // There also can't be duplicate tensor.
+ if(inputName.find("input_") != std::string::npos)
+ {
+ std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+ DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
+ }
+
+ std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+ DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+ tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+ // operatorInputNames/operatorOutputNames ends up being the same as
+ // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ {op}, // operators
+ tensors, // tensors
+ {inputName}, // inputs
+ {outputName}); // outputs
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/PadOperator.hpp b/src/backends/tosaCommon/operatorMappings/PadOperator.hpp
new file mode 100644
index 0000000000..7844669b9c
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/PadOperator.hpp
@@ -0,0 +1,16 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertPadToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const PadDescriptor* padDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
index a4d7d0ed28..6c9b565416 100644
--- a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
@@ -90,7 +90,7 @@ TosaSerializationBasicBlock* ConvertQuantizeToTosaOperator(const Layer* layer,
tensors.push_back(zeroPointTensor);
// const_scale
- TosaSerializationOperator *scaleOp = nullptr;
+ TosaSerializationOperator* scaleOp = nullptr;
TosaSerializationTensor* scaleTensor = nullptr;
CreateConstTosaOperator<float>(outputNameScale,
scale,
@@ -138,9 +138,9 @@ TosaSerializationBasicBlock* ConvertQuantizeToTosaOperator(const Layer* layer,
}
else
{
- double scale_alpha = inputs[0]->GetQuantizationScale() / outputs[0]->GetQuantizationScale();
- int32_t input_zp = inputs[0]->GetQuantizationOffset();
- int32_t output_zp = outputs[0]->GetQuantizationOffset();
+ double scale_alpha = inputs[0]->GetQuantizationScale() / outputs[0]->GetQuantizationScale();
+ int32_t input_zp = inputs[0]->GetQuantizationOffset();
+ int32_t output_zp = outputs[0]->GetQuantizationOffset();
TosaSerializationOperator* rescaleOp = nullptr;
CreateRescaleTosaOperator(inputName,
diff --git a/src/backends/tosaCommon/operatorMappings/ReduceOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReduceOperator.cpp
new file mode 100644
index 0000000000..7ce51297b0
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReduceOperator.cpp
@@ -0,0 +1,178 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+#include "ReduceOperator.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include "TosaRescaleOperatorUtils.hpp"
+
+TosaSerializationBasicBlock* ConvertReduceToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ReduceDescriptor* reduceDescriptor)
+{
+ // Early exits
+ if (!inputs[0])
+ {
+ throw armnn::Exception("Must provide a valid input tensor.");
+ }
+
+ if (inputs[0]->IsQuantized() ^ outputs[0]->IsQuantized())
+ {
+ throw armnn::Exception("Both input and output tensors must be either quantised or non-quantised data types.");
+ }
+
+ if (reduceDescriptor->m_vAxis.size() > 1)
+ {
+ throw armnn::Exception("ConvertReduceOperator: Reduce Operation with multiple axes not implemented.");
+ }
+
+ if (reduceDescriptor->m_vAxis.empty())
+ {
+ throw armnn::Exception("ConvertReduceOperator: Reduce Operation with empty axis not implemented.");
+ }
+
+ auto axis = static_cast<int32_t>(reduceDescriptor->m_vAxis[0]);
+ auto rank = static_cast<int32_t>(inputs[0]->GetNumDimensions());
+
+ if (axis < 0 || axis >= rank)
+ {
+ throw armnn::Exception("Axis value not within range of input shape.");
+ }
+
+ // Tensor names
+ std::string inputName = "input_";
+
+ std::string outputNameRescale1 = "intermediate0_" + GetUniqueTosaMappingID();
+ std::string outputNameReduce = "intermediate1_" + GetUniqueTosaMappingID();
+ std::string outputNameRescale2 = "intermediate2_" + GetUniqueTosaMappingID();
+
+ std::string outputName = "output0_";
+
+ std::string reduceOpName = GetReduceOperationAsCString(reduceDescriptor->m_ReduceOperation);
+ std::string blockName = "Op_REDUCE_" + reduceOpName + "_block_" + GetUniqueTosaMappingID();
+
+ std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
+ std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
+
+ if (layer)
+ {
+ inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
+ outputName = GenerateUniqueOutputName(*layer);
+ }
+
+ std::vector<TosaSerializationTensor*> tensors;
+
+ DType inputType = ArmNNToDType(inputs[0]->GetDataType());
+
+ if (inputName.substr(0, 6) == "input_")
+ {
+ tensors.emplace_back(new TosaSerializationTensor(inputName,
+ inputShape,
+ inputType,
+ {}));
+ }
+
+ int32_t input_shift = 20;
+
+ double input_scale = static_cast<double>(1 << input_shift) * inputs[0]->GetQuantizationScale();
+ double output_scale = 1.0 / (outputs[0]->GetQuantizationScale() * static_cast<double>(1 << input_shift));
+
+ int32_t input_zp = 0;
+ int32_t output_zp = 0;
+
+ std::vector<TosaSerializationOperator*> operators;
+
+ // Conditional RESCALE
+ if (inputs[0]->IsQuantized())
+ {
+ TosaSerializationOperator* rescaleOp1 = nullptr;
+
+ CreateRescaleTosaOperator(inputName,
+ outputNameRescale1,
+ input_scale,
+ input_zp,
+ output_zp,
+ true,
+ true,
+ &rescaleOp1);
+
+ operators.emplace_back(rescaleOp1);
+
+ tensors.emplace_back(new TosaSerializationTensor(outputNameRescale1,
+ inputShape,
+ DType_INT32,
+ {}));
+ }
+
+ // REDUCE
+ TosaAxisAttribute reduceAttribute(axis);
+
+ switch(reduceDescriptor->m_ReduceOperation)
+ {
+ case ReduceOperation::Sum:
+ operators.emplace_back(new TosaSerializationOperator(Op_REDUCE_SUM,
+ Attribute_AxisAttribute,
+ &reduceAttribute,
+ { tensors.back()->GetName() },
+ { outputNameReduce }));
+ break;
+ default:
+ throw armnn::Exception("ConvertReduceOperator: Reduce Operation not implemented.");
+ }
+
+ std::vector<int32_t>& outputShapeReduce = inputShape;
+ outputShapeReduce[reduceDescriptor->m_vAxis[0]] = 1;
+
+ tensors.emplace_back(new TosaSerializationTensor(outputNameReduce,
+ outputShapeReduce,
+ tensors.back()->GetDtype(),
+ {}));
+
+ // Conditional RESCALE
+ if (inputs[0]->IsQuantized())
+ {
+ TosaSerializationOperator* rescaleOp2 = nullptr;
+
+ CreateRescaleTosaOperator(outputNameReduce,
+ outputNameRescale2,
+ output_scale,
+ output_zp,
+ input_zp,
+ true,
+ true,
+ &rescaleOp2);
+
+ operators.push_back(rescaleOp2);
+
+ tensors.emplace_back(new TosaSerializationTensor(outputNameRescale2,
+ outputShapeReduce,
+ inputType,
+ {}));
+ }
+
+ // RESHAPE
+ TosaReshapeAttribute reshapeAttribute(GetTosaTensorShape(outputs[0]->GetShape()));
+
+ operators.emplace_back(new TosaSerializationOperator(Op_RESHAPE,
+ Attribute_ReshapeAttribute,
+ &reshapeAttribute,
+ { tensors.back()->GetName() },
+ { outputName }));
+
+ tensors.emplace_back(new TosaSerializationTensor(outputName,
+ outputShape,
+ inputType,
+ {}));
+
+ return new TosaSerializationBasicBlock(blockName, // name
+ mainName, // region name
+ operators, // operators
+ tensors, // tensors
+ { inputName }, // inputs
+ { outputName }); // outputs
+}
diff --git a/src/backends/tosaCommon/operatorMappings/ReduceOperator.hpp b/src/backends/tosaCommon/operatorMappings/ReduceOperator.hpp
new file mode 100644
index 0000000000..cbbe297d40
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReduceOperator.hpp
@@ -0,0 +1,13 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+TosaSerializationBasicBlock* ConvertReduceToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ReduceDescriptor* reduceDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
index bd1a59670e..541b39cd8d 100644
--- a/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ReluOperator.cpp
@@ -17,7 +17,7 @@
TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
const std::vector<const TensorInfo*>& inputs,
const std::vector<const TensorInfo*>& outputs,
- const ActivationDescriptor*)
+ const ActivationDescriptor* desc)
{
if (inputs.size() != 1)
{
@@ -31,7 +31,36 @@ TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
std::string inputName = std::string("input_");
std::string outputName = std::string("output0_");
- std::string blockName = std::string("Op_RELU_block_") + GetUniqueTosaMappingID();
+ std::string blockName = "";
+
+ int32_t clamp_min = 0;
+ int32_t clamp_max = 0;
+ float float_max = 0.0f;
+ switch (desc->m_Function)
+ {
+ case ActivationFunction::ReLu:
+ {
+ clamp_max = std::numeric_limits<int32_t>::max();
+ float_max = std::numeric_limits<float>::max();
+ blockName = std::string("Op_RELU_block_") + GetUniqueTosaMappingID();
+ break;
+ }
+ case ActivationFunction::BoundedReLu:
+ {
+ clamp_max = static_cast<int32_t>(desc->m_A);
+ float_max = desc->m_A;
+ blockName = std::string("Op_BOUNDED_RELU_block_") + GetUniqueTosaMappingID();
+ break;
+ }
+ case ActivationFunction::LeakyReLu:
+ {
+ throw Exception("LeakyRelu TOSA mappings are performed in ConvertLeakyReluToTosaOperator().");
+ }
+ default:
+ {
+ throw Exception("Activation function is not supported in ConvertReluToTosaOperator().");
+ }
+ }
// If a layer is present then the block will be used for execution, so input and output names need to be determined
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
@@ -60,8 +89,6 @@ TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
- int32_t clamp_min = 0;
- int32_t clamp_max = std::numeric_limits<int32_t>::max();
std::string clampInputNameStr = inputName;
if (inputDType0 == tosa::DType::DType_INT8 || inputDType0 == tosa::DType::DType_INT16)
{
@@ -72,18 +99,26 @@ TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
int32_t input_zp = inputs[0]->GetQuantizationOffset();
int32_t output_zp = outputs[0]->GetQuantizationOffset();
- clamp_min = outputs[0]->GetQuantizationOffset();
+ clamp_min = output_zp;
+
+ if (desc->m_Function == ActivationFunction::BoundedReLu)
+ {
+ clamp_max = static_cast<int32_t>(std::round(desc->m_A / outputs[0]->GetQuantizationScale())) + output_zp;
+ }
+
if (inputDType0 == tosa::DType::DType_INT8)
{
clamp_min =
clamp_min < std::numeric_limits<int8_t>::min() ? std::numeric_limits<int8_t>::min() : clamp_min;
- clamp_max = std::numeric_limits<int8_t>::max();
+ clamp_max =
+ clamp_max > std::numeric_limits<int8_t>::max() ? std::numeric_limits<int8_t>::max() : clamp_max;
}
else
{
clamp_min =
clamp_min < std::numeric_limits<int16_t>::min() ? std::numeric_limits<int16_t>::min() : clamp_min;
- clamp_max = std::numeric_limits<int16_t>::max();
+ clamp_max =
+ clamp_max > std::numeric_limits<int16_t>::max() ? std::numeric_limits<int16_t>::max() : clamp_max;
}
TosaSerializationOperator* rescaleOp = nullptr;
@@ -101,8 +136,8 @@ TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
inputDType0,
{}));
}
-
- TosaClampAttribute attribute(clamp_min, clamp_max, 0, std::numeric_limits<float>::max());
+
+ TosaClampAttribute attribute(clamp_min, clamp_max, 0, float_max);
auto* clamp_op = new TosaSerializationOperator(Op_CLAMP,
Attribute_ClampAttribute,
&attribute,
diff --git a/src/backends/tosaCommon/operatorMappings/ResizeOperator.hpp b/src/backends/tosaCommon/operatorMappings/ResizeOperator.hpp
index 881e7c79ad..502fa7af09 100644
--- a/src/backends/tosaCommon/operatorMappings/ResizeOperator.hpp
+++ b/src/backends/tosaCommon/operatorMappings/ResizeOperator.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,7 +10,7 @@
using namespace armnn;
using namespace tosa;
-TosaSerializationBasicBlock* ConvertResizeToTosaOperator(const Layer* inputSize,
- const std::vector<const TensorInfo*>& outputSize,
- const std::vector<const TensorInfo*>& scale_n,
- const ResizeDescriptor* scale_d);
+TosaSerializationBasicBlock* ConvertResizeToTosaOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ResizeDescriptor* resizeDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/RsqrtOperator.cpp
index d0eac0b4f4..cc1c70a663 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/RsqrtOperator.cpp
@@ -3,17 +3,21 @@
// SPDX-License-Identifier: MIT
//
-#include "ElementwiseUnaryOperator.hpp"
+#include "RsqrtOperator.hpp"
-TosaSerializationBasicBlock* ConvertElementwiseUnaryOperator(const Layer* layer,
- const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const ElementwiseUnaryDescriptor* unaryDescriptor)
+TosaSerializationBasicBlock* ConvertRsqrtOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseUnaryDescriptor* unaryDescriptor)
{
+ if (unaryDescriptor->m_Operation != UnaryOperation::Rsqrt)
+ {
+ throw armnn::Exception("ConvertRsqrtOperator: Unsupported elementwise unary operation in descriptor.");
+ }
+
std::string input0Name = std::string("input_");
std::string outputName = std::string("output0_");
- std::string blockName = std::string("Op_ELEMENTWISEUNARY_block_") + GetUniqueTosaMappingID();
-
+ std::string blockName = std::string("Op_RSQRT_block_") + GetUniqueTosaMappingID();
// If a layer is present then the block will be used for execution, so input and output names need to be determined
// using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
@@ -23,22 +27,11 @@ TosaSerializationBasicBlock* ConvertElementwiseUnaryOperator(const Layer* layer,
outputName = GenerateUniqueOutputName(*layer);
}
- TosaSerializationOperator* op = nullptr;
- switch(unaryDescriptor->m_Operation)
- {
- case UnaryOperation::Rsqrt:
- {
- op = new TosaSerializationOperator(tosa::Op_RSQRT,
- Attribute_NONE,
- nullptr,
- {input0Name},
- {outputName});
- blockName = std::string("Op_RSQRT_block_") + GetUniqueTosaMappingID();
- break;
- }
- default:
- throw armnn::Exception("ConvertElementwiseUnaryToTosaOperator: Unsupported layer type.");
- }
+ auto* op = new TosaSerializationOperator(tosa::Op_RSQRT,
+ Attribute_NONE,
+ nullptr,
+ {input0Name},
+ {outputName});
std::vector<TosaSerializationTensor*> tensors;
// Only add input tensor if connected layer is an input layer.
diff --git a/src/backends/tosaCommon/operatorMappings/RsqrtOperator.hpp b/src/backends/tosaCommon/operatorMappings/RsqrtOperator.hpp
new file mode 100644
index 0000000000..7804e91598
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/RsqrtOperator.hpp
@@ -0,0 +1,16 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertRsqrtOperator(const Layer* layer,
+ const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseUnaryDescriptor* unaryDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index 9d7ff1e4c9..5e1d3ff4ff 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -6,19 +6,26 @@
#pragma once
#include "ReluOperator.hpp"
+#include "GeluOperator.hpp"
#include "LeakyReluOperator.hpp"
#include "AvgPool2DIgnoreValueOperator.hpp"
+#include "BatchMatMulOperator.hpp"
#include "ConcatOperator.hpp"
#include "ConstantOperator.hpp"
#include "Conv2dOperator.hpp"
#include "DepthwiseConv2dOperator.hpp"
#include "ElementwiseBinaryOperator.hpp"
-#include "ElementwiseUnaryOperator.hpp"
+#include "FullyConnectedOperator.hpp"
+#include "PadOperator.hpp"
+#include "RsqrtOperator.hpp"
#include "Pooling2DOperator.hpp"
#include "QuantizeOperator.hpp"
+#include "ReduceOperator.hpp"
#include "ReshapeOperator.hpp"
#include "ResizeOperator.hpp"
#include "SliceOperator.hpp"
#include "SplitOperator.hpp"
#include "TransposeConv2dOperator.hpp"
#include "TransposeOperator.hpp"
+#include "ExpOperator.hpp"
+#include "LogOperator.hpp" \ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TosaTableUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaTableUtils.hpp
new file mode 100644
index 0000000000..d12f0d0986
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/TosaTableUtils.hpp
@@ -0,0 +1,96 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+//
+// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cfloat>
+#include <vector>
+#include <functional>
+#include <cstdint>
+#include <cmath>
+
+
+// Abstract of getTosaConst8bitTable() function from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_utils.cc
+inline std::vector<int16_t> getTosaConst8bitTable(float input_scale,
+ int32_t input_zp,
+ float output_scale,
+ int32_t output_zp,
+ std::function<float(float)> func)
+{
+ // TosaTableAttribute requires int16 vector input. However, TOSA TABLE legalizations are performed using int8.
+ std::vector<int16_t> table;
+ table.reserve(256);
+ float inverse_scale = 1.0f / output_scale;
+ for (int32_t i = -128; i < 128; i++)
+ {
+ float dequantized = input_scale * static_cast<float>(i - input_zp);
+ float transformed = func(dequantized);
+
+ float max = (output_scale > 1.0) ? FLT_MAX : (FLT_MAX * output_scale);
+ if (transformed >= max)
+ {
+ table.push_back(INT8_MAX);
+ continue;
+ }
+
+ int32_t rescaled = static_cast<int32_t>(std::round(transformed * inverse_scale));
+ int32_t quantized = static_cast<int32_t>(rescaled + output_zp);
+ table.push_back(
+ static_cast<int8_t>(std::min(std::max(quantized, -128), 127)));
+ }
+ return table;
+}
+
+// Abstract of getTosaConst16bitTable() function from:
+// tensorflow/compiler/mlir/tosa/transforms/legalize_utils.cc
+template <typename FloatT>
+inline std::vector<int16_t> getTosaConst16bitTable(float input_scale,
+ int32_t input_zp,
+ float output_scale,
+ int32_t output_zp,
+ std::function<FloatT(FloatT)> func)
+{
+ std::vector<int16_t> table;
+ table.reserve(513);
+
+ FloatT input_min =
+ input_scale * static_cast<FloatT>(std::numeric_limits<int16_t>::min() - input_zp);
+ FloatT input_max =
+ input_scale * static_cast<FloatT>(std::numeric_limits<int16_t>::max() - input_zp);
+ FloatT output_min =
+ output_scale * static_cast<FloatT>(std::numeric_limits<int16_t>::min() - output_zp);
+ FloatT output_max =
+ output_scale * static_cast<FloatT>(std::numeric_limits<int16_t>::max() - output_zp);
+
+ FloatT step = (input_max - input_min) / 512;
+ FloatT half_step = step / 2;
+ FloatT output_scaling_inv = 65536 / (output_max - output_min);
+
+ for (int32_t i = 0; i < 512; i++)
+ {
+ FloatT iFloat = static_cast<FloatT>(i);
+ FloatT sample_val =
+ std::round(func(input_min + (iFloat * step)) * output_scaling_inv);
+ FloatT midpoint_interp_val = std::round(
+ ((func(input_min + (iFloat + 1) * step) * output_scaling_inv) +
+ std::round(func(input_min + (iFloat * step)) * output_scaling_inv)) /
+ 2);
+ FloatT midpoint_val = std::round(func(input_min + (iFloat * step) + half_step) *
+ output_scaling_inv);
+ FloatT midpoint_err = midpoint_interp_val - midpoint_val;
+ FloatT bias = std::round(midpoint_err / 2);
+
+ table.push_back(static_cast<int16_t>(
+ std::min<FloatT>(std::max<FloatT>(sample_val - bias, -32768), 32767)));
+ }
+
+ FloatT max_val = std::round(func(input_max) * output_scaling_inv);
+ table.push_back(static_cast<int16_t>(
+ std::min<FloatT>(std::max<FloatT>(max_val, -32768), 32767)));
+ return table;
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/test/FullyConnectedChecker.hpp b/src/backends/tosaCommon/test/FullyConnectedChecker.hpp
new file mode 100644
index 0000000000..fac8d8f7c3
--- /dev/null
+++ b/src/backends/tosaCommon/test/FullyConnectedChecker.hpp
@@ -0,0 +1,101 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TosaTestUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+void VerifyFullyConnected(TosaSerializationBasicBlock* fcBlock,
+ const std::vector<std::vector<int32_t>>& inputShape,
+ const std::vector<std::vector<int32_t>>& outputShape,
+ const BaseDescriptor& descriptor,
+ DType dataType = DType_FP32)
+{
+ uint32_t numInputs = static_cast<uint32_t>(inputShape.size());
+ uint32_t numOutputs = static_cast<uint32_t>(outputShape.size());
+ uint32_t numInputTensors = PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)->m_BiasEnabled ? 3 : 2;
+
+ std::string blockStr = "Op_FULLY_CONNECTED_block_";
+ CHECK(fcBlock->GetName().find(blockStr) != std::string::npos);
+ CHECK(fcBlock->GetInputs().size() == numInputTensors);
+ CHECK(fcBlock->GetOutputs().size() == numOutputs);
+ CHECK(fcBlock->GetOperators().size() == 2);
+ CHECK(fcBlock->GetTensors().size() == (numInputs + numOutputs + 1));
+
+ //
+ // Verify Reshape operator
+ //
+
+ TosaSerializationOperator* reshapeOp = fcBlock->GetOperators().at(0);
+ CHECK(reshapeOp->GetAttributeType() == tosa::Attribute_ReshapeAttribute);
+ CHECK(reshapeOp->GetOp() == tosa::Op_RESHAPE);
+
+ //
+ // Verify FullyConnected operator
+ //
+
+ TosaSerializationOperator* fullyConnectedOp = fcBlock->GetOperators().at(1);
+ CHECK(fullyConnectedOp->GetAttributeType() == tosa::Attribute_FullyConnectedAttribute);
+ CHECK(fullyConnectedOp->GetOp() == tosa::Op_FULLY_CONNECTED);
+
+ // Inputs
+ CHECK(fullyConnectedOp->GetInputTensorNames().size() == numInputTensors);
+
+ // - Input
+ std::basic_string<char> blockInputName = fcBlock->GetInputs()[0];
+ std::basic_string<char> operatorInputName = reshapeOp->GetInputTensorNames()[0];
+ std::basic_string<char> inputTensorName = fcBlock->GetTensors()[0]->GetName();
+
+ CHECK(blockInputName == operatorInputName);
+ CHECK(inputTensorName == operatorInputName);
+ CHECK(blockInputName.find("input_") != std::string::npos);
+
+ TosaSerializationTensor* inputTensor = fcBlock->GetTensorByName(operatorInputName);
+ CHECK(inputTensor->GetDtype() == dataType);
+ CHECK(inputTensor->GetData().size() == 0);
+ CHECK(inputTensor->GetShape() == inputShape[0]);
+
+ // - Weights
+ std::basic_string<char> blockWeightsName = fcBlock->GetInputs()[1];
+ std::basic_string<char> operatorWeightsName = fullyConnectedOp->GetInputTensorNames()[1];
+
+ CHECK(blockWeightsName == operatorWeightsName);
+ CHECK(blockWeightsName.find("constant_") != std::string::npos);
+
+ // - Bias
+ if (PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)->m_BiasEnabled)
+ {
+ std::basic_string<char> blockBiasName = fcBlock->GetInputs()[2];
+ std::basic_string<char> operatorBiasName = fullyConnectedOp->GetInputTensorNames()[2];
+
+ CHECK(blockBiasName == operatorBiasName);
+ CHECK(blockBiasName.find("constant_") != std::string::npos);
+ }
+
+
+ // Outputs
+ CHECK(fullyConnectedOp->GetOutputTensorNames().size() == numOutputs);
+
+ std::basic_string<char> blockOutputName = fcBlock->GetOutputs()[0];
+ std::basic_string<char> operatorOutputName = fullyConnectedOp->GetOutputTensorNames()[0];
+ std::basic_string<char> outputTensorName = fcBlock->GetTensors()[numInputs+1]->GetName();
+
+ CHECK(blockOutputName == operatorOutputName);
+ CHECK(outputTensorName == operatorOutputName);
+ CHECK(blockOutputName.find("output0_") != std::string::npos);
+
+ TosaSerializationTensor* outputTensor = fcBlock->GetTensorByName(operatorOutputName);
+ CHECK(outputTensor->GetDtype() == dataType);
+ CHECK(outputTensor->GetData().size() == 0);
+ CHECK(outputTensor->GetShape() == outputShape[0]);
+
+ CHECK(blockOutputName == operatorOutputName);
+
+ // Verify Attribute
+ TosaFullyConnectedAttribute attribute = fullyConnectedOp->GetAttribute();
+ CHECK( 0 == attribute.weight_zp());
+ CHECK( 0 == attribute.input_zp());
+} \ No newline at end of file
diff --git a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
index 6ad6ea8d05..ce56648a53 100644
--- a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
@@ -4,9 +4,11 @@
//
#include "AvgPool2DIgnoreValueChecker.hpp"
+#include "FullyConnectedChecker.hpp"
#include "QuantizeChecker.hpp"
#include "SplitChecker.hpp"
-#include <backendsCommon/test/ActivationEndToEndTestImpl.hpp>
+#include "CommonTestUtils.hpp"
+
#include <armnn/IRuntime.hpp>
using namespace armnn;
@@ -85,6 +87,95 @@ TEST_CASE("GetTosaMappingFromLayer_AvgPool2DIgnoreValueLayer")
descriptor);
}
+TEST_CASE("GetTosaMapping_FullyConnectedLayer")
+{
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_BiasEnabled = true;
+
+ constexpr static unsigned int inputWidth = 3u;
+ constexpr static unsigned int inputHeight = 2u;
+ constexpr static unsigned int inputChannels = 1u;
+ constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
+ constexpr static unsigned int outputChannels = 2u;
+
+ const armnn::TensorInfo inputInfo({ 1, inputChannels, inputHeight, inputWidth }, DataType::Float32);
+ const armnn::TensorInfo outputInfo({ 1, outputChannels }, DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ outputChannels, inputSize }, DataType::Float32, 1.0, 0, true);
+ const armnn::TensorInfo biasesInfo({ outputChannels }, DataType::Float32, 1.0, 0, true);
+
+ std::vector<std::vector<int32_t>> inputShapes = {{ 1, inputChannels, inputHeight, inputWidth },
+ { outputChannels, inputSize },
+ { outputChannels }};
+ std::vector<std::vector<int32_t>> outputShape = {{ 1, outputChannels }};
+
+
+ TosaSerializationBasicBlock* basicBlock = GetTosaMapping(nullptr,
+ LayerType::FullyConnected,
+ {&inputInfo, &weightsInfo, &biasesInfo},
+ {&outputInfo},
+ descriptor);
+
+ VerifyFullyConnected(basicBlock,
+ inputShapes,
+ outputShape,
+ descriptor);
+}
+
+TEST_CASE("GetTosaMappingFromLayer_FullyConnectedLayer")
+{
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ INetworkPtr net(INetwork::Create());
+
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_BiasEnabled = true;
+
+ constexpr static unsigned int inputWidth = 3u;
+ constexpr static unsigned int inputHeight = 2u;
+ constexpr static unsigned int inputChannels = 1u;
+ constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
+ constexpr static unsigned int outputChannels = 2u;
+
+ const armnn::TensorInfo inputInfo({ 1, inputChannels, inputHeight, inputWidth }, DataType::Float32);
+ const armnn::TensorInfo outputInfo({ 1, outputChannels }, DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ outputChannels, inputSize }, DataType::Float32, 1.0, 0, true);
+ const armnn::TensorInfo biasesInfo({ outputChannels }, DataType::Float32, 1.0, 0, true);
+
+ std::vector<std::vector<int32_t>> inputShapes = {{ 1, inputChannels, inputHeight, inputWidth }};
+ std::vector<std::vector<int32_t>> outputShape = {{ 1, outputChannels }};
+
+ std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
+ ConstTensor weights(weightsInfo, weightsData);
+
+ std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
+ ConstTensor biases(biasesInfo, biasesData);
+
+ IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input");
+ IConnectableLayer* const weightsLayer = net->AddConstantLayer(weights, "weights");
+ IConnectableLayer* const biasesLayer = net->AddConstantLayer(biases, "biases");
+ IConnectableLayer* const fullyConnectedLayer = net->AddFullyConnectedLayer(descriptor, "fully_connected");
+ IConnectableLayer* const outputLayer = net->AddOutputLayer(0);
+
+ inputLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
+ weightsLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(1));
+ biasesLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(2));
+ fullyConnectedLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+ biasesLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
+ fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ TosaSerializationBasicBlock* basicBlock = GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(fullyConnectedLayer));
+
+ VerifyFullyConnected(basicBlock,
+ inputShapes,
+ outputShape,
+ descriptor);
+}
+
TEST_CASE("GetTosaMapping_QuantizeLayer")
{
NullDescriptor descriptor;
@@ -98,6 +189,7 @@ TEST_CASE("GetTosaMapping_QuantizeLayer")
GetTosaMapping(nullptr, LayerType::Quantize, {&inputTensorInfo}, {&outputTensorInfo}, descriptor);
VerifyQuantize(basicBlock, shape, ArmNNToDType(DataType::Float32), ArmNNToDType(outputDataType));
}
+
TEST_CASE("GetTosaMappingFromLayer_QuantizeLayer")
{
IRuntime::CreationOptions options;
@@ -193,45 +285,4 @@ TEST_CASE("GetTosaMappingFromLayer_SplitLayer")
descriptor);
}
-// Activation
-
-static std::vector<BackendId> tosaDefaultBackends = { "TosaRef" };
-
-TEST_CASE("GetTosaMapping_ActivationFloat32")
-{
- ActivationEndToEndTest<DataType::Float32>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 1.f, 0, 0.01f);
-}
-
-TEST_CASE("GetTosaMapping_ActivationFloat16")
-{
- ActivationEndToEndTest<DataType::Float16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 1.f, 0, 0.01f);
-}
-
-TEST_CASE("GetTosaMapping_ActivationInt32")
-{
- ActivationEndToEndTest<DataType::Signed32>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.15f, 0, 0.01f);
-}
-
-TEST_CASE("GetTosaMapping_ActivationInt16")
-{
- ActivationEndToEndTest<DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.35f, 0, 0.01f);
-}
-
-TEST_CASE("GetTosaMapping_ActivationInt8")
-{
- ActivationEndToEndTest<DataType::QSymmS8>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.75f, 0, 0.01f);
-}
-
-TEST_CASE("UNSUPPORTED_GetTosaMapping_ActivationUInt8")
-{
- try
- {
- ActivationEndToEndTest<DataType::QAsymmU8>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 1.f, 0, 0.01f);
- FAIL("An exception should have been thrown");
- }
- catch (armnn::Exception& e)
- {
- CHECK_EQ(std::string(e.what()), "Failed to assign a backend to each layer");
- }
-}
}
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index 8665aa9102..57946f662a 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -254,6 +254,7 @@ TEST_CASE("GetTosaMappingFromLayer_Conv2dLayer")
AssertTosaOneToOneMappingBasicBlock(
basicBlock, inputShape, outputShape, Op_CONV2D, Attribute_ConvAttribute, descriptor, LayerType::Convolution2d);
}
+
TEST_CASE("GetTosaMapping_ElementwiseUnaryLayerRsqrt")
{
TensorInfo inputInfo = TensorInfo({ 2, 2 }, DataType::Float32, 0.0f, 0, true);
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index 38fd01b93c..bf3783cce4 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -42,6 +42,7 @@ bool TosaRefLayerSupport::IsLayerSupported(const LayerType& type,
case LayerType::Output:
return true;
case LayerType::Addition:
+ case LayerType::BatchMatMul:
case LayerType::Multiplication:
case LayerType::Subtraction:
case LayerType::ElementwiseBinary:
@@ -86,9 +87,23 @@ bool TosaRefLayerSupport::IsLayerSupported(const LayerType& type,
}
break;
}
+ case LayerType::FullyConnected:
+ {
+ inputInfos.push_back(&infos[0]); // input
+ outputInfos.push_back(&infos[1]); // output
+ inputInfos.push_back(&infos[2]); // weights
+ auto fullyConnectedDesc = PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor);
+ if(fullyConnectedDesc->m_BiasEnabled)
+ {
+ inputInfos.push_back(&infos[3]); // bias
+ }
+ break;
+ }
case LayerType::ElementwiseUnary:
+ case LayerType::Pad:
case LayerType::Pooling2d:
case LayerType::Quantize:
+ case LayerType::Reduce:
case LayerType::Reshape:
case LayerType::Resize:
case LayerType::Slice:
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index 09a3d44c02..4f6ca9c6f8 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -7,14 +7,18 @@
#include "backendsCommon/test/ActivationEndToEndTestImpl.hpp"
#include "backendsCommon/test/AdditionEndToEndTestImpl.hpp"
+#include "backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp"
#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
#include "backendsCommon/test/ConcatEndToEndTestImpl.hpp"
#include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
#include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
#include "backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp"
+#include "backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp"
#include "backendsCommon/test/MultiplicationEndToEndTestImpl.hpp"
+#include "backendsCommon/test/PadEndToEndTestImpl.hpp"
#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
#include "backendsCommon/test/QuantizationEndToEndTestImpl.hpp"
+#include "backendsCommon/test/ReduceEndToEndTestImpl.hpp"
#include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
#include "backendsCommon/test/ResizeEndToEndTestImpl.hpp"
#include "backendsCommon/test/SliceEndToEndTestImpl.hpp"
@@ -30,25 +34,46 @@ TEST_SUITE("TosaRefEndToEnd")
static std::vector<BackendId> tosaDefaultBackends = { "TosaRef" };
// Activation
-//LeakyRelu
+// LeakyRelu
TEST_CASE("TosaRefLeakyReluActivationFloat32")
{
ActivationEndToEndTest<DataType::Float32>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 1.f, 0, 0.01f);
}
+
TEST_CASE("TosaRefLeakyReluActivationFloat16")
{
ActivationEndToEndTest<DataType::Float16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.3f, 5, 0.01f);
}
+
+TEST_CASE("TosaRefLeakyReluActivationInt32")
+{
+ ActivationEndToEndTest<DataType::Signed32>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.15f, 0, 0.01f);
+}
+
+TEST_CASE("TosaRefLeakyReluActivationInt16")
+{
+ ActivationEndToEndTest<DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.35f, 0, 0.01f);
+}
+
TEST_CASE("TosaRefLeakyReluActivationInt8")
{
ActivationEndToEndTest<DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.6f, 7, 0.01f);
}
-TEST_CASE("TosaRefLeakyReluActivationInt16")
+
+TEST_CASE("UNSUPPORTED_ActivationUInt8")
{
- ActivationEndToEndTest<DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 0.15f, 0, 0.01f);
+ try
+ {
+ ActivationEndToEndTest<DataType::QAsymmU8>(tosaDefaultBackends, ActivationFunction::LeakyReLu, 1.f, 0, 0.01f);
+ FAIL("An exception should have been thrown");
+ }
+ catch (armnn::Exception& e)
+ {
+ CHECK_EQ(std::string(e.what()), "Failed to assign a backend to each layer");
+ }
}
-//Relu
+// Relu
TEST_CASE("TosaRefReLuEndToEndTestQAsymmS8")
{
ActivationEndToEndTest<armnn::DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::ReLu);
@@ -69,6 +94,37 @@ TEST_CASE("TosaRefReLuEndToEndTestQSymmS16")
ActivationEndToEndTest<armnn::DataType::QSymmS16>(tosaDefaultBackends, ActivationFunction::ReLu);
}
+// Gelu
+TEST_CASE("TosaRefGeluEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(tosaDefaultBackends, ActivationFunction::Gelu);
+}
+
+// BoundedRelu
+TEST_CASE("TosaRefBoundedReLuEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<armnn::DataType::Float32>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestFloat16")
+{
+ ActivationEndToEndTest<armnn::DataType::Float16>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestQAsymmS8")
+{
+ ActivationEndToEndTest<armnn::DataType::QAsymmS8>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
+TEST_CASE("TosaRefBoundedReLuEndToEndTestQSymmS16")
+{
+ ActivationEndToEndTest<armnn::DataType::QSymmS16>(
+ tosaDefaultBackends, ActivationFunction::BoundedReLu, 1.0f, 0, 6.0f, 0.0f);
+}
+
// Addition
TEST_CASE("TosaRefAdditionEndtoEndTestFloat32")
{
@@ -85,6 +141,83 @@ TEST_CASE("TosaRefAdditionEndtoEndTestFloat16")
AdditionEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
}
+// BatchMatMul
+
+TEST_CASE("TosaRefBatchMatMulEndToEndFloat32Test")
+{
+ BatchMatMulEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulEndToEndInt8Test")
+{
+ BatchMatMulEndToEnd<armnn::DataType::QSymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulEndToEndInt16Test")
+{
+ BatchMatMulEndToEnd<armnn::DataType::QSymmS16>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulNoTransposeEndToEndFloat32Test")
+{
+ BatchMatMulNoTransposeEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulNoTransposeEndToEndInt8Test")
+{
+ BatchMatMulNoTransposeEndToEnd<armnn::DataType::QSymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulNoTransposeEndToEndInt16Test")
+{
+ BatchMatMulNoTransposeEndToEnd<armnn::DataType::QSymmS16>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulSimple4DEndToEndFloat32Test")
+{
+ BatchMatMulSimple4DEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulSimple4DEndToEndInt8Test")
+{
+ BatchMatMulSimple4DEndToEnd<armnn::DataType::QAsymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulSimple4DEndToEndInt16Test")
+{
+ BatchMatMulSimple4DEndToEnd<armnn::DataType::QSymmS16>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulNotSquareEndToEndFloat32Test")
+{
+ BatchMatMulNotSquareEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulNotSquareEndToEndInt8Test")
+{
+ BatchMatMulNotSquareEndToEnd<armnn::DataType::QAsymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMulNotSquareEndToEndInt16Test")
+{
+ BatchMatMulNotSquareEndToEnd<armnn::DataType::QSymmS16>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMul4DEndToEndFloat32Test")
+{
+ BatchMatMul4DEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMul4DEndToEndInt8Test")
+{
+ BatchMatMul4DEndToEnd<armnn::DataType::QAsymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefBatchMatMul4DEndToEndInt16Test")
+{
+ BatchMatMul4DEndToEnd<armnn::DataType::QSymmS16>(tosaDefaultBackends);
+}
+
// Concat
TEST_CASE("TosaRefConcatEndToEndDim0TestFloat32")
{
@@ -158,6 +291,12 @@ TEST_CASE("TosaRefDepthwiseConv2dEndtoEndTestInt8")
armnn::DataType::Signed32>(tosaDefaultBackends, armnn::DataLayout::NHWC);
}
+TEST_CASE("TosaRefDepthwiseConv2dEndtoEndTestInt8BiasDisabled")
+{
+ DepthwiseConvolution2dEndToEnd<armnn::DataType::QSymmS8,
+ armnn::DataType::Signed32>(tosaDefaultBackends, armnn::DataLayout::NHWC, false);
+}
+
// Elementwise Binary
//Add
TEST_CASE("TosaRefAddEndtoEndTestInt32")
@@ -211,6 +350,75 @@ TEST_CASE("TosaRefSubEndtoEndTestInt8")
armnn::BinaryOperation::Sub);
}
+// FullyConnected
+TEST_CASE("TosaRefFullyConnectedEndToEndTestFloat32")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends, true);
+}
+
+TEST_CASE("TosaRefFullyConnectedEndToEndTestNoBiasFloat32")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends, false);
+}
+
+TEST_CASE("TosaRefFullyConnectedEndToEndTestInt8")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QAsymmS8,
+ armnn::DataType::QAsymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QAsymmS8>(tosaDefaultBackends, true);
+}
+
+TEST_CASE("TosaRefFullyConnectedEndToEndTestNoBiasInt8")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QAsymmS8,
+ armnn::DataType::QAsymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QAsymmS8>(tosaDefaultBackends, false);
+}
+
+TEST_CASE("TosaRefFullyConnectedEndToEndTestInt8Symm")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QSymmS8,
+ armnn::DataType::QSymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QSymmS8>(tosaDefaultBackends, true);
+}
+
+TEST_CASE("TosaRefFullyConnectedEndToEndTestNoBiasInt8Symm")
+{
+ FullyConnectedConstantWeightsAndBiasEndToEnd<armnn::DataType::QSymmS8,
+ armnn::DataType::QSymmS8,
+ armnn::DataType::Signed32,
+ armnn::DataType::QSymmS8>(tosaDefaultBackends, false);
+}
+
+// Pad
+TEST_CASE("TosaRefPadEndToEndFloat32Test")
+{
+ PadEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefPadEndToEndInt8Test")
+{
+ PadEndToEnd<armnn::DataType::QSymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefPad4dEndToEndFloat32Test")
+{
+ Pad4dEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefPad4dEndToEndInt8Test")
+{
+ Pad4dEndToEnd<armnn::DataType::QSymmS8>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefPad4dEndToEndInt32Test")
+{
+ Pad4dEndToEnd<armnn::DataType::Signed32>(tosaDefaultBackends);
+}
+
// Pooling
// Average Pool 2D
TEST_CASE("TosaRefAvgPool2DEndtoEndTestFloat32")
@@ -285,6 +493,128 @@ TEST_CASE("TosaRefQuantizeFromFloat16ToInt32")
QuantizationEndToEndFloat16<DataType::Signed32>(tosaDefaultBackends);
}
+// Reduce
+// Reduce Sum
+TEST_CASE("TosaRefReduce2dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd2d<DataType::Signed32>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce2dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Signed32>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce2dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd2d<DataType::Float16>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce2dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float16>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce2dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd2d<DataType::Float32>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce2dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::Float32>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce2dEndtoEndTestInt8")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce2dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd2d<DataType::QAsymmS8>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Signed32>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd3d<DataType::Float16>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float16>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd3d<DataType::Float32>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::Float32>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestInt8")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce3dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd3d<DataType::QAsymmS8>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestSigned32")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestSigned32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Signed32>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestFloat16")
+{
+ ReduceEndToEnd4d<DataType::Float16>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestFloat16WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float16>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestFloat32")
+{
+ ReduceEndToEnd4d<DataType::Float32>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestFloat32WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::Float32>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestInt8")
+{
+ ReduceEndToEnd4d<DataType::QAsymmS8>(tosaDefaultBackends, ReduceOperation::Sum);
+}
+
+TEST_CASE("TosaRefReduce4dEndtoEndTestInt8WithKeepDims")
+{
+ ReduceEndToEnd4d<DataType::QAsymmS8>(tosaDefaultBackends, ReduceOperation::Sum, true);
+}
+
// Reshape
TEST_CASE("TosaRefReshapeEndtoEndTestFloat32")
{
@@ -301,12 +631,39 @@ TEST_CASE("TosaRefReshapeEndtoEndTestFloat16")
ReshapeEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
}
+// Rsqrt
TEST_CASE("TosaRefRsqrtEndtoEndTestFloat32")
{
ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends,
UnaryOperation::Rsqrt);
}
+// Exp
+TEST_CASE("TosaRefExpEndToEndTestFloat32")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends,
+ UnaryOperation::Exp);
+}
+
+TEST_CASE("TosaRefExpEndToEndTestInt8")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmS8>(tosaDefaultBackends,
+ UnaryOperation::Exp);
+}
+
+// Log
+TEST_CASE("TosaRefLogEndToEndTestFloat32")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends,
+ UnaryOperation::Log);
+}
+
+TEST_CASE("TosaRefLogEndToEndTestSint8")
+{
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmS8>(tosaDefaultBackends,
+ UnaryOperation::Log);
+}
+
// Resize
TEST_CASE("TosaRefResizeNearestNeighborEndToEndFloat32AlignCornersNhwcTest")
{