aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test')
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp242
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp364
2 files changed, 465 insertions, 141 deletions
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 6754106c49..2201499b3a 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -2641,6 +2641,103 @@ LayerTestResult<float,4> AdditionTest(
return ret;
}
+LayerTestResult<float, 5> Addition5dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int depth = 2;
+ unsigned int batchSize = 2;
+ unsigned int channels = 2;
+ unsigned int height = 2;
+ unsigned int width = 3;
+
+ armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int shape[] = {depth, batchSize, channels, height, width};
+
+ inputTensorInfo1 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
+ inputTensorInfo2 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
+ outputTensorInfo = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
+
+
+ auto input1 = MakeTensor<float, 5>(inputTensorInfo1, std::vector<float>(
+ {
+ 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
+ 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
+
+ 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
+ 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
+
+
+ 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
+ 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
+
+ 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
+ 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
+
+ }));
+
+ auto input2 = MakeTensor<float, 5>(inputTensorInfo2, std::vector<float>(
+ {
+ 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
+ 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
+
+ 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
+ 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
+
+
+ 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
+ 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
+
+ 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
+ 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
+ }));
+
+ LayerTestResult<float, 5> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<float, 5>(outputTensorInfo, std::vector<float>(
+ {
+ 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
+ 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
+
+ 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
+ 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
+
+
+ 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
+ 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
+
+ 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
+ 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
+ }));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::AdditionQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+ AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+
+ inputHandle1->Allocate();
+ inputHandle2->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0][0], outputHandle.get());
+
+ return ret;
+}
+
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AdditionBroadcastTestImpl(
armnn::IWorkloadFactory& workloadFactory,
@@ -4103,25 +4200,25 @@ LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
}
namespace {
-LayerTestResult<float,4> MultiplicationTestHelper(
+template<std::size_t NumDims>
+LayerTestResult<float,NumDims> MultiplicationTestHelper(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const unsigned int shape0[4],
+ const unsigned int shape0[NumDims],
const std::vector<float> & values0,
- const unsigned int shape1[4],
+ const unsigned int shape1[NumDims],
const std::vector<float> & values1,
- const unsigned int outShape[4],
+ const unsigned int outShape[NumDims],
const std::vector<float> & outValues)
{
- const uint32_t dimensionCount = 4;
- armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
- armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
- armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
+ armnn::TensorInfo inputTensorInfo0{NumDims, shape0, armnn::DataType::Float32};
+ armnn::TensorInfo inputTensorInfo1{NumDims, shape1, armnn::DataType::Float32};
+ armnn::TensorInfo outputTensorInfo{NumDims, outShape, armnn::DataType::Float32};
- auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
+ auto input0 = MakeTensor<float, NumDims>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<float, NumDims>(inputTensorInfo1, values1);
- LayerTestResult<float,4> ret(outputTensorInfo);
+ LayerTestResult<float,NumDims> ret(outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
@@ -4139,15 +4236,15 @@ LayerTestResult<float,4> MultiplicationTestHelper(
inputHandle1->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
+ CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
+ ret.outputExpected = MakeTensor<float, NumDims>(outputTensorInfo, outValues);
return ret;
}
} // anonymous namespace
@@ -4176,14 +4273,81 @@ LayerTestResult<float,4> MultiplicationTest(
2, 2, 2, 2, 6, 6, 6, 6,
12, 12, 12, 12, 20, 20, 20, 20 });
- return MultiplicationTestHelper(workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return MultiplicationTestHelper<4>(workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
+}
+
+LayerTestResult<float,5> Multiplication5dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int width = 3;
+ const unsigned int height = 2;
+ const unsigned int channelCount = 2;
+ const unsigned int batchSize = 2;
+ const unsigned int depth = 2;
+
+ unsigned int shape[] = { depth, batchSize, channelCount, height, width };
+
+ std::vector<float> input0({
+ 1.80f, 0.20f, 2.30f, 1.30f, 2.10f, 1.00f,
+ 2.60f, 0.60f, 2.10f, 2.30f, 2.30f, 2.00f,
+
+ 2.50f, 1.00f, 2.90f, 3.10f, 1.50f, 2.40f,
+ 2.80f, 1.10f, 1.00f, 3.20f, 1.00f, 2.30f,
+
+
+ 0.30f, 2.20f, 1.00f, 0.20f, 1.60f, 1.40f,
+ 0.80f, 3.20f, 0.10f, 0.10f, 3.10f, 2.10f,
+
+ 1.50f, 2.40f, 1.40f, 0.70f, 2.40f, 1.40f,
+ 1.60f, 1.20f, 1.90f, 0.80f, 0.00f, 0.10f,
+ });
+
+ std::vector<float> input1({
+ 0.70f, 1.00f, 2.90f, 2.20f, 3.10f, 2.80f,
+ 1.80f, 2.00f, 0.50f, 2.30f, 1.20f, 2.70f,
+
+ 2.40f, 0.20f, 3.20f, 1.60f, 0.20f, 2.50f,
+ 2.30f, 0.70f, 2.70f, 1.80f, 2.90f, 2.70f,
+
+
+ 3.20f, 3.20f, 0.70f, 1.90f, 2.70f, 2.50f,
+ 2.40f, 0.90f, 2.30f, 1.80f, 2.50f, 2.00f,
+
+ 1.60f, 2.20f, 1.60f, 2.00f, 0.30f, 3.20f,
+ 0.40f, 3.00f, 2.60f, 0.30f, 0.00f, 2.50f,
+ });
+
+ std::vector<float> output({
+ 1.26f, 0.20f, 6.67f, 2.86f, 6.51f, 2.80f,
+ 4.68f, 1.20f, 1.05f, 5.29f, 2.76f, 5.40f,
+
+ 6.00f, 0.20f, 9.28f, 4.96f, 0.30f, 6.00f,
+ 6.44f, 0.77f, 2.70f, 5.76f, 2.90f, 6.21f,
+
+
+ 0.96f, 7.04f, 0.70f, 0.38f, 4.32f, 3.50f,
+ 1.92f, 2.88f, 0.23f, 0.18f, 7.75f, 4.20f,
+
+ 2.40f, 5.28f, 2.24f, 1.40f, 0.72f, 4.48f,
+ 0.64f, 3.60f, 4.94f, 0.24f, 0.00f, 0.25f,
+ });
+
+ return MultiplicationTestHelper<5>(workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
@@ -4198,14 +4362,14 @@ LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
- return MultiplicationTestHelper(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return MultiplicationTestHelper<4>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
@@ -4226,14 +4390,14 @@ LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
7, 16, 9, 20, 11, 24,
13, 28, 15, 32, 17, 36});
- return MultiplicationTestHelper(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return MultiplicationTestHelper<4>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float,4> CompareMultiplicationTest(
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 0fe5d09cd4..df33aa192e 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -539,6 +539,11 @@ LayerTestResult<T, 4> SimpleReshapeTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 5> Reshape5dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 4> SimpleFloorTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -712,6 +717,10 @@ LayerTestResult<float, 4> AdditionTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<float, 5> Addition5dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -764,6 +773,10 @@ LayerTestResult<float, 4> MultiplicationTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<float, 5> Multiplication5dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -2084,6 +2097,37 @@ LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+std::vector<T> ConvertToDataType(const std::vector<float>& input,
+ const armnn::TensorInfo& inputTensorInfo)
+{
+ std::vector<T> output(input.size());
+ auto outputTensorInfo = inputTensorInfo;
+ outputTensorInfo.SetDataType(ArmnnType);
+
+ std::unique_ptr<armnn::Encoder<float>> pOutputEncoder = armnn::MakeEncoder<float>(outputTensorInfo, output.data());
+ armnn::Encoder<float>& rOutputEncoder = *pOutputEncoder;
+
+ for (auto it = input.begin(); it != input.end(); ++it)
+ {
+ rOutputEncoder.Set(*it);
+ ++rOutputEncoder;
+ }
+ return output;
+}
+
+// Utility method to convert a single value to the correct type
+template <typename T>
+T ConvertToDataType(const float& value,
+ const armnn::TensorInfo& tensorInfo)
+{
+ std::vector<T> output(1);
+ std::unique_ptr<armnn::Encoder<float>> pEncoder = armnn::MakeEncoder<float>(tensorInfo, output.data());
+ armnn::Encoder<float>& rEncoder = *pEncoder;
+ rEncoder.Set(value);
+ return output[0];
+}
+
template<typename T, typename B>
LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::IWorkloadFactory& workloadFactory,
@@ -2130,35 +2174,75 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
return result;
}
-template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-std::vector<T> ConvertToDataType(const std::vector<float>& input,
- const armnn::TensorInfo& inputTensorInfo)
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> FullyConnectedTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled)
{
- std::vector<T> output(input.size());
- auto outputTensorInfo = inputTensorInfo;
- outputTensorInfo.SetDataType(ArmnnType);
+ constexpr static unsigned int inputWidth = 3u;
+ constexpr static unsigned int inputHeight = 2u;
+ constexpr static unsigned int inputChannels = 1u;
- std::unique_ptr<armnn::Encoder<float>> pOutputEncoder = armnn::MakeEncoder<float>(outputTensorInfo, output.data());
- armnn::Encoder<float>& rOutputEncoder = *pOutputEncoder;
+ constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
- for (auto it = input.begin(); it != input.end(); ++it)
+ constexpr static unsigned int outputChannels = 2u;
+
+ armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
+ inputTensorInfo.SetQuantizationScale(0.1f);
+ inputTensorInfo.SetQuantizationOffset(63);
+
+ armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
+ outputTensorInfo.SetQuantizationScale(5.f);
+ outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
+
+ armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
+ weightsDesc.SetQuantizationScale(0.2f);
+ weightsDesc.SetQuantizationOffset(93);
+
+ armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
+ biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
+ biasesDesc.SetQuantizationOffset(0);
+
+ LayerTestResult<T, 2> result(outputTensorInfo);
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ {
+ -1.2f, 6.1f, -3.5f,
+ 18.8f, -5.5f, 2.9f
+ },
+ inputTensorInfo));
+
+ auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
+ {
+ -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
+ 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
+ },
+ weightsDesc));
+
+ auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
+
+ result = SimpleFullyConnectedTestImpl<T>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo, outputTensorInfo,
+ weightsDesc, biasesDesc,
+ weights, bias, input,
+ biasEnabled, true
+ );
+
+ if (biasEnabled)
{
- rOutputEncoder.Set(*it);
- ++rOutputEncoder;
+ result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
+ ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
+ }
+ else
+ {
+ result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
+ ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
}
- return output;
-}
-// Utility method to convert a single value to the correct type
-template <typename T>
-T ConvertToDataType(const float& value,
- const armnn::TensorInfo& tensorInfo)
-{
- std::vector<T> output(1);
- std::unique_ptr<armnn::Encoder<float>> pEncoder = armnn::MakeEncoder<float>(tensorInfo, output.data());
- armnn::Encoder<float>& rEncoder = *pEncoder;
- rEncoder.Set(value);
- return output[0];
+ return result;
}
template<armnn::DataType ArmnnType, typename T>
@@ -2354,8 +2438,8 @@ LayerTestResult<T, 2> RsqrtNegativeTest(
inputValues, expectedOutputValues);
}
-template<typename T>
-LayerTestResult<T, 4> SimpleReshapeTestImpl(
+template<typename T, size_t NumDims>
+LayerTestResult<T, NumDims> SimpleReshapeTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::TensorInfo inputTensorInfo,
@@ -2363,10 +2447,10 @@ LayerTestResult<T, 4> SimpleReshapeTestImpl(
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+ auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+ LayerTestResult<T, NumDims> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, outputExpectedData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -2381,87 +2465,16 @@ LayerTestResult<T, 4> SimpleReshapeTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.origin());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
return ret;
}
template<armnn::DataType ArmnnType, typename T>
-LayerTestResult<T, 2> FullyConnectedTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- bool biasEnabled)
-{
- constexpr static unsigned int inputWidth = 3u;
- constexpr static unsigned int inputHeight = 2u;
- constexpr static unsigned int inputChannels = 1u;
-
- constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
-
- constexpr static unsigned int outputChannels = 2u;
-
- armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
- inputTensorInfo.SetQuantizationScale(0.1f);
- inputTensorInfo.SetQuantizationOffset(63);
-
- armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
- outputTensorInfo.SetQuantizationScale(5.f);
- outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
-
- armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
- weightsDesc.SetQuantizationScale(0.2f);
- weightsDesc.SetQuantizationOffset(93);
-
- armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
- biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
- biasesDesc.SetQuantizationOffset(0);
-
- LayerTestResult<T, 2> result(outputTensorInfo);
-
- auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
- {
- -1.2f, 6.1f, -3.5f,
- 18.8f, -5.5f, 2.9f
- },
- inputTensorInfo));
-
- auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
- {
- -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
- 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
- },
- weightsDesc));
-
- auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
-
- result = SimpleFullyConnectedTestImpl<T>(
- workloadFactory,
- memoryManager,
- inputTensorInfo, outputTensorInfo,
- weightsDesc, biasesDesc,
- weights, bias, input,
- biasEnabled, true
- );
-
- if (biasEnabled)
- {
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
- ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
- }
- else
- {
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
- ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
- }
-
- return result;
-}
-
-template<armnn::DataType ArmnnType, typename T>
LayerTestResult<T, 4> SimpleReshapeTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -2509,7 +2522,69 @@ LayerTestResult<T, 4> SimpleReshapeTest(
},
outputTensorInfo);
- return SimpleReshapeTestImpl<T>(
+ return SimpleReshapeTestImpl<T, 4>(
+ workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 5> Reshape5dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 2, 2, 8, 1, 1 };
+ unsigned int outputShape[] = { 2, 2, 2, 2, 2 };
+
+ inputTensorInfo = armnn::TensorInfo(5, inputShape, ArmnnType);
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ outputTensorInfo = armnn::TensorInfo(5, outputShape, ArmnnType);
+ outputTensorInfo.SetQuantizationScale(1.0f);
+
+ auto input = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f,
+ 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
+
+ 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f,
+ 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f,
+ },
+ inputTensorInfo);
+
+ auto outputExpected = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 1.0f,
+ 2.0f, 3.0f,
+
+ 4.0f, 5.0f,
+ 6.0f, 7.0f,
+
+
+ 8.0f, 9.0f,
+ 10.0f, 11.0f,
+
+ 12.0f, 13.0f,
+ 14.0f, 15.0f,
+
+
+
+ 16.0f, 17.0f,
+ 18.0f, 19.0f,
+
+ 20.0f, 21.0f,
+ 22.0f, 23.0f,
+
+
+ 24.0f, 25.0f,
+ 26.0f, 27.0f,
+
+ 28.0f, 29.0f,
+ 30.0f, 31.0f,
+ },
+ outputTensorInfo);
+
+ return SimpleReshapeTestImpl<T, 5>(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected);
}
@@ -4746,3 +4821,88 @@ LayerTestResult<T, 3> Stack3dOutput1Axis3InputTest(
outputExpectedData
);
}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 5> Stack5dOutputTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo ({ 2, 2, 2, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 2, 2, 2, 2, 3 }, ArmnnType);
+
+ std::vector<std::vector<T>> inputData;
+
+ inputData.push_back(
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+
+ 13, 14, 15,
+ 16, 17, 18,
+
+ 19, 20, 21,
+ 22, 23, 24
+ });
+
+ inputData.push_back(
+ {
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36,
+
+
+ 37, 38, 39,
+ 40, 41, 42,
+
+ 43, 44, 45,
+ 46, 47, 48
+ });
+
+ std::vector<T> outputExpectedData =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+
+ 7, 8, 9,
+ 10, 11, 12,
+
+
+ 25, 26, 27,
+ 28, 29, 30,
+
+ 31, 32, 33,
+ 34, 35, 36,
+
+
+
+ 13, 14, 15,
+ 16, 17, 18,
+
+ 19, 20, 21,
+ 22, 23, 24,
+
+
+ 37, 38, 39,
+ 40, 41, 42,
+
+ 43, 44, 45,
+ 46, 47, 48
+
+ };
+
+ return StackTestHelper<ArmnnType, T, 5>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ 1U,
+ inputData,
+ outputExpectedData
+ );
+}