aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2021-10-18 13:07:49 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2021-10-20 16:03:04 +0100
commit5d7b0a314b3e354a6cbcf15f5dd78b50f1e02774 (patch)
tree3d844c4575193ffddfe3a17c51cb808c9f16ddb0 /src/backends/reference
parent73010788725f8f07efb6df20711ece712ee213ea (diff)
downloadarmnn-5d7b0a314b3e354a6cbcf15f5dd78b50f1e02774.tar.gz
Add ConstTensorsAsInput support for Conv3d
* Constant weights and biases are now stored as Constant layers. * Updated Serializer, Deserializer and unit tests to reflect this. * Updated TfLiteParser. * Updated Ref backend to handle constant weights and bias as inputs rather than reading from member variables. * Added Conv3d EndToEnd test. * Added NCDHW DataLayout and unit tests. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I10cdd354ca5f1c748730f92ffdb36bf810f83c8e
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp31
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp130
-rw-r--r--src/backends/reference/workloads/Conv3dImpl.cpp47
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.cpp33
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.hpp4
5 files changed, 200 insertions, 45 deletions
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 0cc8f4aa10..dc4dcecd81 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -11,6 +11,7 @@
#include <backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
+#include <backendsCommon/test/Convolution3dEndToEndTestImpl.hpp>
#include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
#include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
@@ -566,6 +567,36 @@ TEST_CASE("RefConcatEndToEndDim3Uint8Test")
ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
+TEST_CASE("RefConvolution3dFloat32Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dNcdhwFloat32Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+ armnn::DataLayout::NCDHW);
+}
+
+TEST_CASE("RefConvolution3dFloat16Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float16, armnn::DataType::Float16>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dUint8Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dInt8Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
TEST_CASE("RefEluEndToEndTestFloat32")
{
EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index f5d388d007..cb31b37161 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -208,37 +208,119 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue,
false,
DataLayout::NHWC);
-// Convolution 3d
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32, SimpleConvolution3d3x3x3Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8, SimpleConvolution3d3x3x3Int8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8, SimpleConvolution3d3x3x3Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int16, SimpleConvolution3d3x3x3Int16Test, false)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32, Convolution3d2x2x2Strides3x5x5Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8, Convolution3d2x2x2Strides3x5x5Int8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8, Convolution3d2x2x2Strides3x5x5Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt16, Convolution3d2x2x2Strides3x5x5Int16Test, true)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32, Convolution3dPaddingSame3x3x3Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8, Convolution3dPaddingSame3x3x3Int8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8, Convolution3dPaddingSame3x3x3Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt16, Convolution3dPaddingSame3x3x3Int16Test, false)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2Float32, Convolution3d2x2x2Dilation2x2x2Float32Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt8, Convolution3d2x2x2Dilation2x2x2Int8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestUint8, Convolution3d2x2x2Dilation2x2x2Uint8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt16, Convolution3d2x2x2Dilation2x2x2Int16Test, true)
+// Convolution 3d - NDHWC
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32,
+ SimpleConvolution3d3x3x3Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8,
+ SimpleConvolution3d3x3x3Int8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8,
+ SimpleConvolution3d3x3x3Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int16,
+ SimpleConvolution3d3x3x3Int16Test,
+ false,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32,
+ Convolution3d2x2x2Strides3x5x5Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8,
+ Convolution3d2x2x2Strides3x5x5Int8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8,
+ Convolution3d2x2x2Strides3x5x5Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt16,
+ Convolution3d2x2x2Strides3x5x5Int16Test,
+ true,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32,
+ Convolution3dPaddingSame3x3x3Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8,
+ Convolution3dPaddingSame3x3x3Int8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8,
+ Convolution3dPaddingSame3x3x3Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt16,
+ Convolution3dPaddingSame3x3x3Int16Test,
+ false,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2Float32,
+ Convolution3d2x2x2Dilation2x2x2Float32Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt8,
+ Convolution3d2x2x2Dilation2x2x2Int8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestUint8,
+ Convolution3d2x2x2Dilation2x2x2Uint8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt16,
+ Convolution3d2x2x2Dilation2x2x2Int16Test,
+ true,
+ DataLayout::NDHWC)
ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dStrideDilationPadding3x3x3Float32,
Convolution3dStrideDilationPadding3x3x3Float32Test,
- true)
+ true,
+ DataLayout::NDHWC)
ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Stride3x3x3SmallTestFloat32,
Convolution3d2x2x2Stride3x3x3SmallFloat32Test,
- false)
+ false,
+ DataLayout::NDHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16, Convolution3d2x3x3Float16Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16, Convolution3d2x2x2SmallFloat16Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16,
+ Convolution3d2x3x3Float16Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16,
+ Convolution3d2x2x2SmallFloat16Test,
+ false,
+ DataLayout::NDHWC)
+
+// Convolution 3d - NCDHW
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3NcdhwFloat32,
+ SimpleConvolution3d3x3x3Float32Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestNcdhwFloat16,
+ Convolution3d2x3x3Float16Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5NcdhwTestInt8,
+ Convolution3d2x2x2Strides3x5x5Int8Test,
+ true,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3NcdhwTestUint8,
+ Convolution3dPaddingSame3x3x3Uint8Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2NcdhwTestInt16,
+ Convolution3d2x2x2Dilation2x2x2Int16Test,
+ true,
+ DataLayout::NCDHW)
// Depthwise Convolution
diff --git a/src/backends/reference/workloads/Conv3dImpl.cpp b/src/backends/reference/workloads/Conv3dImpl.cpp
index 484d887cfc..1c06d624a8 100644
--- a/src/backends/reference/workloads/Conv3dImpl.cpp
+++ b/src/backends/reference/workloads/Conv3dImpl.cpp
@@ -113,11 +113,25 @@ void Convolve3d(const TensorShape& rInputShape,
// Keep this implementation, as using DataLayoutIndexed::GetIndex
// causes large performance regression.
- inputIndex = batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
- (zInput-paddingFront) * inputHeight * inputWidth * inChannels +
- (yInput-paddingTop) * inputWidth * inChannels +
- (xInput-paddingLeft) * inChannels +
- cInput;
+ if (dataLayoutIndexed.GetDataLayout() == DataLayout::NDHWC)
+ {
+ inputIndex =
+ batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
+ (zInput-paddingFront) * inputHeight * inputWidth * inChannels +
+ (yInput-paddingTop) * inputWidth * inChannels +
+ (xInput-paddingLeft) * inChannels +
+ cInput;
+ }
+ else
+ {
+ // NCDHW DataLayout
+ inputIndex =
+ batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
+ inputDepth * inputHeight * inputWidth * cInput +
+ (zInput-paddingFront) * inputHeight * inputWidth +
+ (yInput-paddingTop) * inputWidth +
+ xInput-paddingLeft;
+ }
inputValue = inputVec[inputIndex];
}
@@ -133,11 +147,24 @@ void Convolve3d(const TensorShape& rInputShape,
sum += biasVec[cOutput];
}
- unsigned int outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
- zOutput * outputHeight * outputWidth * outChannels +
- yOutput * outputWidth * outChannels +
- xOutput * outChannels +
- cOutput;
+ unsigned int outIdx;
+ if (dataLayoutIndexed.GetDataLayout() == DataLayout::NDHWC)
+ {
+ outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
+ zOutput * outputHeight * outputWidth * outChannels +
+ yOutput * outputWidth * outChannels +
+ xOutput * outChannels +
+ cOutput;
+ }
+ else
+ {
+ // NCDHW DataLayout
+ outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
+ cOutput * outputDepth * outputHeight * outputWidth +
+ zOutput * outputHeight * outputWidth +
+ yOutput * outputWidth +
+ xOutput;
+ }
rOutputEncoder[outIdx];
rOutputEncoder.Set(sum);
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
index ea425daec9..afab88f0a8 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
@@ -19,10 +19,10 @@ RefConvolution3dWorkload::RefConvolution3dWorkload(
WorkloadInfo detailsInfo;
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
@@ -30,18 +30,25 @@ RefConvolution3dWorkload::RefConvolution3dWorkload(
descriptor.m_Parameters,
detailsInfo,
this->GetGuid());
+}
- m_Weight = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Weight ));
- const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
+void RefConvolution3dWorkload::PostAllocationConfigure()
+{
+ PostAllocationConfigure(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+void RefConvolution3dWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
+ std::vector<ITensorHandle*> outputs)
+{
+ IgnoreUnused(outputs);
+ const TensorInfo& rFilterInfo = GetTensorInfo(inputs[1]);
m_FilterShape = rFilterInfo.GetShape();
- m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight.get()->Map(true));
+ m_FilterDecoder = MakeDecoder<float>(rFilterInfo);
- if ( descriptor.m_Parameters.m_BiasEnabled )
+ if (m_Data.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Bias ));
- const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
- m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
+ const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
+ m_BiasDecoder = MakeDecoder<float>(biasInfo);
}
}
@@ -52,6 +59,8 @@ void RefConvolution3dWorkload::Execute() const
void RefConvolution3dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
{
+ PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+
Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
}
@@ -65,6 +74,12 @@ void RefConvolution3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::
const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape();
const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape();
+ m_FilterDecoder->Reset(inputs[1]->Map());
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ m_BiasDecoder->Reset(inputs[2]->Map());
+ }
+
Convolve3d(inputShape, *inputDecoder, outputShape, *outputEncoder, m_FilterShape,
*m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
m_Data.m_Parameters.m_DataLayout,
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
index 0373a8b900..4d97512095 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
@@ -19,14 +19,14 @@ public:
explicit RefConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor,
const WorkloadInfo& info);
+ void PostAllocationConfigure() override;
void Execute() const override;
void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
private:
+ void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedTensorHandle> m_Weight;
- std::unique_ptr<ScopedTensorHandle> m_Bias;
std::unique_ptr<Decoder<float>> m_FilterDecoder;
std::unique_ptr<Decoder<float>> m_BiasDecoder;