aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp15
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp11
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp11
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt1
-rw-r--r--src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp167
-rw-r--r--src/backends/backendsCommon/test/DataLayoutUtils.hpp24
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp143
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp60
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp31
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp130
-rw-r--r--src/backends/reference/workloads/Conv3dImpl.cpp47
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.cpp33
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.hpp4
14 files changed, 540 insertions, 143 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 27b59ea3a6..2716c827af 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1320,7 +1320,12 @@ void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
{
const std::string descriptorName{"Convolution3dQueueDescriptor"};
- ValidateNumInputs(workloadInfo, descriptorName, 1);
+ uint32_t numInputs = 2;
+ if (m_Parameters.m_BiasEnabled)
+ {
+ numInputs = 3;
+ }
+ ValidateNumInputs(workloadInfo, descriptorName, numInputs);
ValidateNumOutputs(workloadInfo, descriptorName, 1);
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
@@ -1329,9 +1334,7 @@ void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
- ValidatePointer(m_Weight, descriptorName, "weight");
-
- const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
+ const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 5, "weight");
ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
@@ -1339,9 +1342,7 @@ void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
Optional<TensorInfo> optionalBiasTensorInfo;
if (m_Parameters.m_BiasEnabled)
{
- ValidatePointer(m_Bias, descriptorName, "bias");
-
- optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
+ optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 29d39d14a9..4e56aaf823 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -208,18 +208,9 @@ struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2
void Validate(const WorkloadInfo& workloadInfo) const;
};
-// Convolution 2D layer workload data.
+// Convolution 3D layer workload data.
struct Convolution3dQueueDescriptor : QueueDescriptorWithParameters<Convolution3dDescriptor>
{
- Convolution3dQueueDescriptor()
- : m_Weight(nullptr)
- , m_Bias(nullptr)
- {
- }
-
- const ConstTensorHandle* m_Weight;
- const ConstTensorHandle* m_Bias;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 3b7f3a0f1f..55ce3554f9 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -250,7 +250,11 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
+
+ ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
+ "Convolution3dLayer: Weights should be connected as a Constant Layer.");
+ const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
+ dataType);
const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
@@ -258,14 +262,15 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
+ biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
+ GetBiasTypeFromWeightsType(dataType));
}
result = layerSupportObject.IsConvolution3dSupported(
input,
output,
descriptor,
- OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
+ weights,
biases,
reason);
break;
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index e3221c5ae4..b90407fd7c 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -13,6 +13,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
ChannelShuffleEndToEndTestImpl.hpp
ComparisonEndToEndTestImpl.hpp
CompatibilityTests.cpp
+ Convolution3dEndToEndTestImpl.hpp
CustomMemoryOptimizerStrategyTests.cpp
DefaultAsyncExecuteTest.cpp
DepthToSpaceEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..33bf9a180b
--- /dev/null
+++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
@@ -0,0 +1,167 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "EndToEndTestImpl.hpp"
+#include "QuantizeHelper.hpp"
+
+#include <ResolveType.hpp>
+
+#include <backendsCommon/test/CommonTestUtils.hpp>
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+
+#include <map>
+#include <vector>
+
+namespace
+{
+
+armnn::INetworkPtr CreateConvolution3dNetwork(const armnn::Convolution3dDescriptor& descriptor,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& weightsInfo,
+ const armnn::TensorInfo& biasInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::ConstTensor& weights,
+ const armnn::ConstTensor& biases)
+{
+ using namespace armnn;
+
+ INetworkPtr network(INetwork::Create());
+ IConnectableLayer* input = network->AddInputLayer(0, "input");
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights");
+ armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias");
+ IConnectableLayer* convolution3d = network->AddConvolution3dLayer(descriptor, "convolution3d");
+ IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+ Connect(input, convolution3d, inputInfo, 0, 0);
+ Connect(weightsLayer, convolution3d, weightsInfo, 0, 1);
+ Connect(biasLayer, convolution3d, biasInfo, 0, 2);
+ Connect(convolution3d, output, outputInfo, 0, 0);
+
+ return network;
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
+void Convolution3dEndToEnd(const std::vector<armnn::BackendId>& backends,
+ armnn::DataLayout dataLayout)
+{
+ using namespace armnn;
+ using T = ResolveType<ArmnnType>;
+ using BT = ResolveType<ArmnnBType>;
+
+ const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
+ const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
+
+ TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset);
+ TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+ TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true);
+ TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
+
+ std::vector<float> inputData =
+ {
+ 0.0f, 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+ 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
+ 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f,
+ 25.0f, 26.0f, 27.0f, 28.0f, 29.0f,
+ 30.0f, 31.0f, 32.0f, 33.0f, 34.0f,
+ 35.0f, 36.0f, 37.0f, 38.0f, 39.0f,
+ 40.0f, 41.0f, 42.0f, 43.0f, 44.0f,
+
+ 45.0f, 46.0f, 47.0f, 48.0f, 49.0f,
+ 50.0f, 51.0f, 52.0f, 53.0f, 54.0f,
+ 55.0f, 56.0f, 57.0f, 58.0f, 59.0f,
+ 60.0f, 61.0f, 62.0f, 63.0f, 64.0f,
+ 65.0f, 66.0f, 67.0f, 68.0f, 69.0f,
+
+ 70.0f, 71.0f, 72.0f, 73.0f, 74.0f,
+ 75.0f, 76.0f, 77.0f, 78.0f, 79.0f,
+ 80.0f, 81.0f, 82.0f, 83.0f, 84.0f,
+ 85.0f, 86.0f, 87.0f, 88.0f, 89.0f,
+ 90.0f, 91.0f, 92.0f, 93.0f, 94.0f,
+ 95.0f, 96.0f, 97.0f, 98.0f, 99.0f,
+
+ 100.0f, 101.0f, 102.0f, 103.0f, 104.0f,
+ 105.0f, 106.0f, 107.0f, 108.0f, 109.0f,
+ 110.0f, 111.0f, 112.0f, 113.0f, 114.0f,
+ 115.0f, 116.0f, 117.0f, 118.0f, 119.0f,
+ 120.0f, 121.0f, 122.0f, 123.0f, 124.0f
+ };
+
+ std::vector<float> weightsData =
+ {
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+
+ 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f,
+
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+ };
+
+ std::vector<float> biasesData = { 1.f };
+
+ std::vector<float> expectedOutputData =
+ {
+ 559.0f, 595.0f,
+
+ 739.0f, 775.0f,
+
+ 1459.0f, 1495.0f,
+
+ 1639.0f, 1675.0f,
+ };
+
+ Convolution3dDescriptor descriptor;
+ descriptor.m_PadLeft = 0;
+ descriptor.m_PadRight = 0;
+ descriptor.m_PadTop = 0;
+ descriptor.m_PadBottom = 0;
+ descriptor.m_PadFront = 0;
+ descriptor.m_PadBack = 0;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_StrideZ = 2;
+ descriptor.m_BiasEnabled = true;
+ descriptor.m_DataLayout = dataLayout;
+
+ // Permute input and output if NCDHW.
+ if (dataLayout == DataLayout::NCDHW)
+ {
+ PermuteTensorNdhwcToNcdhw(inputInfo, inputData);
+ PermuteTensorNdhwcToNcdhw(outputInfo, expectedOutputData);
+ }
+
+ // Quantize data
+ std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+ std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
+
+ std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
+
+ ConstTensor weights(weightsInfo, qWeightsData);
+ ConstTensor biases(biasesInfo, qBiasesData);
+
+ INetworkPtr network = CreateConvolution3dNetwork(descriptor,
+ inputInfo,
+ weightsInfo,
+ biasesInfo,
+ outputInfo,
+ weights,
+ biases);
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+ { { 0, qInputData } },
+ { { 0, qExpectedOutputData } },
+ backends);
+}
diff --git a/src/backends/backendsCommon/test/DataLayoutUtils.hpp b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
index 9411212f4f..89b3900979 100644
--- a/src/backends/backendsCommon/test/DataLayoutUtils.hpp
+++ b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
@@ -34,3 +34,27 @@ void PermuteTensorNhwcToNchw(armnn::TensorInfo& tensorInfo, std::vector<T>& tens
tensorData = tmp;
}
+
+template<typename T>
+void PermuteTensorNdhwcToNcdhw(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+ const armnn::PermutationVector ndhwcToNcdhw = { 0, 2, 3, 4, 1 };
+
+ tensorInfo = armnnUtils::Permuted(tensorInfo, ndhwcToNcdhw);
+
+ std::vector<T> tmp(tensorData.size());
+ armnnUtils::Permute(tensorInfo.GetShape(), ndhwcToNcdhw, tensorData.data(), tmp.data(), sizeof(T));
+ tensorData = tmp;
+}
+
+template<typename T>
+void PermuteTensorNcdhwToNdhwc(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+ const armnn::PermutationVector ncdhwToNdhwc = { 0, 4, 1, 2, 3 };
+
+ tensorInfo = armnnUtils::Permuted(tensorInfo, ncdhwToNdhwc);
+
+ std::vector<T> tmp(tensorData.size());
+ armnnUtils::Permute(tensorInfo.GetShape(), ncdhwToNdhwc, tensorData.data(), tmp.data(), sizeof(T));
+ tensorData = tmp;
+}
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index c3a6aa1a3c..f9bdfde622 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -407,7 +407,7 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
}
catch (const LayerValidationException& exc)
{
- CHECK(strcmp(exc.what(), "FullyConnected layer weights not set: Input slot(s) 1 not connected "
+ CHECK(strcmp(exc.what(), "Fully_Connected layer weights not set: Input slot(s) 1 not connected "
"to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
}
}
@@ -434,7 +434,7 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
}
catch (const LayerValidationException& exc)
{
- CHECK(strcmp(exc.what(), "FullyConnected layer bias not set: Input slot(s) 2 not connected "
+ CHECK(strcmp(exc.what(), "Fully_Connected layer bias not set: Input slot(s) 2 not connected "
"to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
}
}
@@ -457,7 +457,7 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
}
catch (const LayerValidationException& exc)
{
- CHECK(strcmp(exc.what(), "FullyConnected layer weights and bias not set: Input slot(s) 1 & 2 not "
+ CHECK(strcmp(exc.what(), "Fully_Connected layer weights and bias not set: Input slot(s) 1 & 2 not "
"connected to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
}
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
index 259272d996..1406ab039b 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
@@ -11,6 +11,7 @@
#include <backendsCommon/TensorHandle.hpp>
+#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -228,23 +229,20 @@ LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset());
}
+ // Permute input and output if data layout is NCDHW.
+ if (dataLayout == armnn::DataLayout::NCDHW)
+ {
+ PermuteTensorNdhwcToNcdhw(inputTensorInfo, inputData);
+ PermuteTensorNdhwcToNcdhw(outputTensorInfo, outputData);
+ }
+
std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
- std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
-
- armnn::ScopedTensorHandle biasTensor(biasDesc);
- if (biasEnabled)
- {
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- }
-
armnn::Convolution3dQueueDescriptor data;
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_StrideZ = strideZ;
@@ -261,14 +259,29 @@ LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
data.m_Parameters.m_BiasEnabled = biasEnabled;
armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
+ AddInputToWorkload(data, info, kernelDesc, input1Handle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+ std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
+ if (biasEnabled)
+ {
+ input2Handle = tensorHandleFactory.CreateTensorHandle(biasDesc);
+ AddInputToWorkload(data, info, biasDesc, input2Handle.get());
+ }
+
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution3d(data, info);
- inputHandle->Allocate();
+ input0Handle->Allocate();
+ input1Handle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+ CopyDataToITensorHandle(input0Handle.get(), inputData.data());
+ CopyDataToITensorHandle(input1Handle.get(), kernel.data());
+ if (biasEnabled)
+ {
+ input2Handle->Allocate();
+ CopyDataToITensorHandle(input2Handle.get(), bias.data());
+ }
ExecuteWorkload(*workload, memoryManager);
@@ -840,40 +853,44 @@ LayerTestResult<float, 5> SimpleConvolution3d3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> SimpleConvolution3d3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
@@ -881,158 +898,174 @@ LayerTestResult<float, 5> Convolution3d2x2x2Strides3x5x5Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> Convolution3d2x2x2Strides3x5x5Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> Convolution3dPaddingSame3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dStrideDilationPadding3x3x3TestCommonFloat32(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Stride3x3x3SmallTestCommonFloat32(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x3x3TestCommonFloat16(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2SmallTestCommonFloat16(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
index a07c183c76..c612e19c9b 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
@@ -24,118 +24,138 @@ LayerTestResult<float, 5> SimpleConvolution3d3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> SimpleConvolution3d3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3d2x2x2Strides3x5x5Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> Convolution3d2x2x2Strides3x5x5Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> Convolution3dPaddingSame3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 0cc8f4aa10..dc4dcecd81 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -11,6 +11,7 @@
#include <backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
+#include <backendsCommon/test/Convolution3dEndToEndTestImpl.hpp>
#include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
#include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
@@ -566,6 +567,36 @@ TEST_CASE("RefConcatEndToEndDim3Uint8Test")
ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
+TEST_CASE("RefConvolution3dFloat32Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dNcdhwFloat32Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+ armnn::DataLayout::NCDHW);
+}
+
+TEST_CASE("RefConvolution3dFloat16Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float16, armnn::DataType::Float16>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dUint8Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dInt8Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
TEST_CASE("RefEluEndToEndTestFloat32")
{
EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index f5d388d007..cb31b37161 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -208,37 +208,119 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue,
false,
DataLayout::NHWC);
-// Convolution 3d
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32, SimpleConvolution3d3x3x3Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8, SimpleConvolution3d3x3x3Int8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8, SimpleConvolution3d3x3x3Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int16, SimpleConvolution3d3x3x3Int16Test, false)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32, Convolution3d2x2x2Strides3x5x5Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8, Convolution3d2x2x2Strides3x5x5Int8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8, Convolution3d2x2x2Strides3x5x5Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt16, Convolution3d2x2x2Strides3x5x5Int16Test, true)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32, Convolution3dPaddingSame3x3x3Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8, Convolution3dPaddingSame3x3x3Int8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8, Convolution3dPaddingSame3x3x3Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt16, Convolution3dPaddingSame3x3x3Int16Test, false)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2Float32, Convolution3d2x2x2Dilation2x2x2Float32Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt8, Convolution3d2x2x2Dilation2x2x2Int8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestUint8, Convolution3d2x2x2Dilation2x2x2Uint8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt16, Convolution3d2x2x2Dilation2x2x2Int16Test, true)
+// Convolution 3d - NDHWC
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32,
+ SimpleConvolution3d3x3x3Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8,
+ SimpleConvolution3d3x3x3Int8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8,
+ SimpleConvolution3d3x3x3Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int16,
+ SimpleConvolution3d3x3x3Int16Test,
+ false,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32,
+ Convolution3d2x2x2Strides3x5x5Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8,
+ Convolution3d2x2x2Strides3x5x5Int8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8,
+ Convolution3d2x2x2Strides3x5x5Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt16,
+ Convolution3d2x2x2Strides3x5x5Int16Test,
+ true,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32,
+ Convolution3dPaddingSame3x3x3Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8,
+ Convolution3dPaddingSame3x3x3Int8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8,
+ Convolution3dPaddingSame3x3x3Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt16,
+ Convolution3dPaddingSame3x3x3Int16Test,
+ false,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2Float32,
+ Convolution3d2x2x2Dilation2x2x2Float32Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt8,
+ Convolution3d2x2x2Dilation2x2x2Int8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestUint8,
+ Convolution3d2x2x2Dilation2x2x2Uint8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt16,
+ Convolution3d2x2x2Dilation2x2x2Int16Test,
+ true,
+ DataLayout::NDHWC)
ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dStrideDilationPadding3x3x3Float32,
Convolution3dStrideDilationPadding3x3x3Float32Test,
- true)
+ true,
+ DataLayout::NDHWC)
ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Stride3x3x3SmallTestFloat32,
Convolution3d2x2x2Stride3x3x3SmallFloat32Test,
- false)
+ false,
+ DataLayout::NDHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16, Convolution3d2x3x3Float16Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16, Convolution3d2x2x2SmallFloat16Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16,
+ Convolution3d2x3x3Float16Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16,
+ Convolution3d2x2x2SmallFloat16Test,
+ false,
+ DataLayout::NDHWC)
+
+// Convolution 3d - NCDHW
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3NcdhwFloat32,
+ SimpleConvolution3d3x3x3Float32Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestNcdhwFloat16,
+ Convolution3d2x3x3Float16Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5NcdhwTestInt8,
+ Convolution3d2x2x2Strides3x5x5Int8Test,
+ true,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3NcdhwTestUint8,
+ Convolution3dPaddingSame3x3x3Uint8Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2NcdhwTestInt16,
+ Convolution3d2x2x2Dilation2x2x2Int16Test,
+ true,
+ DataLayout::NCDHW)
// Depthwise Convolution
diff --git a/src/backends/reference/workloads/Conv3dImpl.cpp b/src/backends/reference/workloads/Conv3dImpl.cpp
index 484d887cfc..1c06d624a8 100644
--- a/src/backends/reference/workloads/Conv3dImpl.cpp
+++ b/src/backends/reference/workloads/Conv3dImpl.cpp
@@ -113,11 +113,25 @@ void Convolve3d(const TensorShape& rInputShape,
// Keep this implementation, as using DataLayoutIndexed::GetIndex
// causes large performance regression.
- inputIndex = batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
- (zInput-paddingFront) * inputHeight * inputWidth * inChannels +
- (yInput-paddingTop) * inputWidth * inChannels +
- (xInput-paddingLeft) * inChannels +
- cInput;
+ if (dataLayoutIndexed.GetDataLayout() == DataLayout::NDHWC)
+ {
+ inputIndex =
+ batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
+ (zInput-paddingFront) * inputHeight * inputWidth * inChannels +
+ (yInput-paddingTop) * inputWidth * inChannels +
+ (xInput-paddingLeft) * inChannels +
+ cInput;
+ }
+ else
+ {
+ // NCDHW DataLayout
+ inputIndex =
+ batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
+ inputDepth * inputHeight * inputWidth * cInput +
+ (zInput-paddingFront) * inputHeight * inputWidth +
+ (yInput-paddingTop) * inputWidth +
+ xInput-paddingLeft;
+ }
inputValue = inputVec[inputIndex];
}
@@ -133,11 +147,24 @@ void Convolve3d(const TensorShape& rInputShape,
sum += biasVec[cOutput];
}
- unsigned int outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
- zOutput * outputHeight * outputWidth * outChannels +
- yOutput * outputWidth * outChannels +
- xOutput * outChannels +
- cOutput;
+ unsigned int outIdx;
+ if (dataLayoutIndexed.GetDataLayout() == DataLayout::NDHWC)
+ {
+ outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
+ zOutput * outputHeight * outputWidth * outChannels +
+ yOutput * outputWidth * outChannels +
+ xOutput * outChannels +
+ cOutput;
+ }
+ else
+ {
+ // NCDHW DataLayout
+ outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
+ cOutput * outputDepth * outputHeight * outputWidth +
+ zOutput * outputHeight * outputWidth +
+ yOutput * outputWidth +
+ xOutput;
+ }
rOutputEncoder[outIdx];
rOutputEncoder.Set(sum);
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
index ea425daec9..afab88f0a8 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
@@ -19,10 +19,10 @@ RefConvolution3dWorkload::RefConvolution3dWorkload(
WorkloadInfo detailsInfo;
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
@@ -30,18 +30,25 @@ RefConvolution3dWorkload::RefConvolution3dWorkload(
descriptor.m_Parameters,
detailsInfo,
this->GetGuid());
+}
- m_Weight = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Weight ));
- const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
+void RefConvolution3dWorkload::PostAllocationConfigure()
+{
+ PostAllocationConfigure(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+void RefConvolution3dWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
+ std::vector<ITensorHandle*> outputs)
+{
+ IgnoreUnused(outputs);
+ const TensorInfo& rFilterInfo = GetTensorInfo(inputs[1]);
m_FilterShape = rFilterInfo.GetShape();
- m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight.get()->Map(true));
+ m_FilterDecoder = MakeDecoder<float>(rFilterInfo);
- if ( descriptor.m_Parameters.m_BiasEnabled )
+ if (m_Data.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Bias ));
- const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
- m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
+ const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
+ m_BiasDecoder = MakeDecoder<float>(biasInfo);
}
}
@@ -52,6 +59,8 @@ void RefConvolution3dWorkload::Execute() const
void RefConvolution3dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
{
+ PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+
Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
}
@@ -65,6 +74,12 @@ void RefConvolution3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::
const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape();
const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape();
+ m_FilterDecoder->Reset(inputs[1]->Map());
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ m_BiasDecoder->Reset(inputs[2]->Map());
+ }
+
Convolve3d(inputShape, *inputDecoder, outputShape, *outputEncoder, m_FilterShape,
*m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
m_Data.m_Parameters.m_DataLayout,
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
index 0373a8b900..4d97512095 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
@@ -19,14 +19,14 @@ public:
explicit RefConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor,
const WorkloadInfo& info);
+ void PostAllocationConfigure() override;
void Execute() const override;
void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
private:
+ void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedTensorHandle> m_Weight;
- std::unique_ptr<ScopedTensorHandle> m_Bias;
std::unique_ptr<Decoder<float>> m_FilterDecoder;
std::unique_ptr<Decoder<float>> m_BiasDecoder;