aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp70
-rw-r--r--src/backends/reference/RefLayerSupport.hpp7
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp6
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp3
-rw-r--r--src/backends/reference/backend.mk2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp33
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/Conv3dImpl.cpp151
-rw-r--r--src/backends/reference/workloads/Conv3dImpl.hpp38
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.cpp76
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.hpp38
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp1
12 files changed, 429 insertions, 0 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index aaf9aa0e7c..c0ede678bf 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -605,6 +605,76 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution3dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ bool supported = true;
+
+ // Define supported types.
+ std::array<DataType,7> supportedTypes =
+ {
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QSymmS16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference Convolution3d: input is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference Convolution3d: output is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference Convolution3d: input and output types mismatched.");
+
+ const DataType inputType = input.GetDataType();
+ if (IsQuantized8BitType(inputType))
+ {
+ std::array<DataType, 3> supportedWeightTypes =
+ {
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
+ "Reference Convolution3d: weights type not supported for quantized input.");
+ }
+ else
+ {
+ supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
+ "Reference Convolution3d: weights is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
+ "Reference Convolution3d: input and weights types mismatched.");
+ }
+
+ if (biases.has_value())
+ {
+ std::array<DataType,4> biasesSupportedTypes =
+ {
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::Signed32
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
+ "Reference Convolution3d: biases is not a supported type.");
+ }
+ IgnoreUnused(descriptor);
+
+ return supported;
+}
+
bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 2693dc1779..627418e3e1 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -92,6 +92,13 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsConvolution3dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution3dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 681b73a748..18a5af277f 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -241,6 +241,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution2d(const Convolu
return std::make_unique<RefConvolution2dWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<RefConvolution3dWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index fe3eb54795..d00d3ca822 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -115,6 +115,9 @@ public:
std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 2dc2bc4919..7d6c59a273 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -27,6 +27,7 @@ BACKEND_SOURCES := \
workloads/BatchToSpaceNd.cpp \
workloads/Broadcast.cpp \
workloads/ConvImpl.cpp \
+ workloads/Conv3dImpl.cpp \
workloads/Debug.cpp \
workloads/DepthToSpace.cpp \
workloads/DetectionPostProcess.cpp \
@@ -58,6 +59,7 @@ BACKEND_SOURCES := \
workloads/RefConvertFp32ToBf16Workload.cpp \
workloads/RefConvertFp32ToFp16Workload.cpp \
workloads/RefConvolution2dWorkload.cpp \
+ workloads/RefConvolution3dWorkload.cpp \
workloads/RefDebugWorkload.cpp \
workloads/RefDepthToSpaceWorkload.cpp \
workloads/RefDepthwiseConvolution2dWorkload.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 4afee79c85..f5d388d007 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -208,6 +208,39 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue,
false,
DataLayout::NHWC);
+// Convolution 3d
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32, SimpleConvolution3d3x3x3Float32Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8, SimpleConvolution3d3x3x3Int8Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8, SimpleConvolution3d3x3x3Uint8Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int16, SimpleConvolution3d3x3x3Int16Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32, Convolution3d2x2x2Strides3x5x5Float32Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8, Convolution3d2x2x2Strides3x5x5Int8Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8, Convolution3d2x2x2Strides3x5x5Uint8Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt16, Convolution3d2x2x2Strides3x5x5Int16Test, true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32, Convolution3dPaddingSame3x3x3Float32Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8, Convolution3dPaddingSame3x3x3Int8Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8, Convolution3dPaddingSame3x3x3Uint8Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt16, Convolution3dPaddingSame3x3x3Int16Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2Float32, Convolution3d2x2x2Dilation2x2x2Float32Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt8, Convolution3d2x2x2Dilation2x2x2Int8Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestUint8, Convolution3d2x2x2Dilation2x2x2Uint8Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt16, Convolution3d2x2x2Dilation2x2x2Int16Test, true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dStrideDilationPadding3x3x3Float32,
+ Convolution3dStrideDilationPadding3x3x3Float32Test,
+ true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Stride3x3x3SmallTestFloat32,
+ Convolution3d2x2x2Stride3x3x3SmallFloat32Test,
+ false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16, Convolution3d2x3x3Float16Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16, Convolution3d2x2x2SmallFloat16Test, false)
+
+
// Depthwise Convolution
ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d, DepthwiseConvolution2dTest, true, DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, true, DataLayout::NCHW)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 0ab8c6b0bb..e169c03ad8 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -18,6 +18,8 @@ list(APPEND armnnRefBackendWorkloads_sources
Broadcast.hpp
ConvImpl.cpp
ConvImpl.hpp
+ Conv3dImpl.cpp
+ Conv3dImpl.hpp
Debug.cpp
Debug.hpp
Decoders.hpp
@@ -87,6 +89,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefConvertFp32ToFp16Workload.hpp
RefConvolution2dWorkload.cpp
RefConvolution2dWorkload.hpp
+ RefConvolution3dWorkload.cpp
+ RefConvolution3dWorkload.hpp
RefElementwiseWorkload.cpp
RefElementwiseWorkload.hpp
RefDebugWorkload.cpp
diff --git a/src/backends/reference/workloads/Conv3dImpl.cpp b/src/backends/reference/workloads/Conv3dImpl.cpp
new file mode 100644
index 0000000000..484d887cfc
--- /dev/null
+++ b/src/backends/reference/workloads/Conv3dImpl.cpp
@@ -0,0 +1,151 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Conv3dImpl.hpp"
+
+namespace armnn
+{
+
+void Convolve3d(const TensorShape& rInputShape,
+ Decoder<float>& rInputDecoder,
+ const TensorShape& rOutputShape,
+ Encoder<float>& rOutputEncoder,
+ const TensorShape& rFilterShape,
+ Decoder<float>& rFilterDecoder,
+ bool biasEnabled,
+ Decoder<float>* pBiasDecoder,
+ DataLayout dataLayout,
+ unsigned int paddingTop,
+ unsigned int paddingLeft,
+ unsigned int paddingFront,
+ unsigned int xStride,
+ unsigned int yStride,
+ unsigned int zStride,
+ unsigned int xDilation,
+ unsigned int yDilation,
+ unsigned int zDilation)
+{
+ if (biasEnabled && !pBiasDecoder)
+ {
+ throw InvalidArgumentException("Bias is enabled but the bias data is invalid");
+ }
+ const armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
+
+ const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
+ const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
+ const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
+ const unsigned int depthIndex = dataLayoutIndexed.GetDepthIndex();
+
+ const unsigned int inChannels = rInputShape[channelsIndex];
+ const unsigned int outChannels = rOutputShape[channelsIndex];
+
+ const unsigned int batchSize = rOutputShape[0];
+ const unsigned int outputHeight = rOutputShape[heightIndex];
+ const unsigned int outputWidth = rOutputShape[widthIndex];
+ const unsigned int outputDepth = rOutputShape[depthIndex];
+ const unsigned int inputHeight = rInputShape[heightIndex];
+ const unsigned int inputWidth = rInputShape[widthIndex];
+ const unsigned int inputDepth = rInputShape[depthIndex];
+
+ // Conv3d weights layout: [D,H,W,I,O]
+ const unsigned int filterDepth = rFilterShape[0];
+ const unsigned int filterHeight = rFilterShape[1];
+ const unsigned int filterWidth = rFilterShape[2];
+
+ const std::vector<float> inputVec = rInputDecoder.DecodeTensor(rInputShape);
+ const std::vector<float> filterVec = rFilterDecoder.DecodeTensor(rFilterShape);
+
+ const TensorShape biasShape{outChannels};
+ const std::vector<float> biasVec = biasEnabled ? pBiasDecoder->DecodeTensor(biasShape) : std::vector<float>();
+
+ for (unsigned int batchIdx = 0; batchIdx < batchSize; batchIdx++)
+ {
+ for (unsigned int zOutput = 0; zOutput < outputDepth; zOutput++)
+ {
+ for (unsigned int xOutput = 0; xOutput < outputWidth; xOutput++)
+ {
+ for (unsigned int yOutput = 0; yOutput < outputHeight; yOutput++)
+ {
+ for (unsigned int cOutput = 0; cOutput < outChannels; cOutput++)
+ {
+ // This loop goes over each output element.
+ float sum = 0.0f;
+
+ // Loop over each input channel.
+ for (unsigned int zFilter = 0; zFilter < filterDepth; zFilter++)
+ {
+ for (unsigned int yFilter = 0; yFilter < filterHeight; yFilter++)
+ {
+ for (unsigned int xFilter = 0; xFilter < filterWidth; xFilter++)
+ {
+ for (unsigned int cInput = 0; cInput < inChannels; cInput++)
+ {
+ // This loop goes over each input element for each output element.
+ unsigned int filterIndex = 0;
+
+ // Conv3d weights layout: [D,H,W,I,O]
+ // Keep this implementation, as using DataLayoutIndexed::GetIndex
+ // causes large performance regression.
+ filterIndex = zFilter * filterHeight * filterWidth * inChannels * outChannels +
+ yFilter * filterWidth * inChannels * outChannels +
+ xFilter * inChannels * outChannels +
+ cInput * outChannels +
+ cOutput;
+
+ unsigned int yInput = yOutput * yStride + yFilter * yDilation;
+ unsigned int xInput = xOutput * xStride + xFilter * xDilation;
+ unsigned int zInput = zOutput * zStride + zFilter * zDilation;
+
+ float inputValue;
+
+ // Check if we're in the padding.
+ if (yInput < paddingTop || yInput >= inputHeight + paddingTop ||
+ xInput < paddingLeft || xInput >= inputWidth + paddingLeft ||
+ zInput < paddingFront || zInput >= inputDepth + paddingFront)
+ {
+ inputValue = 0.0f;
+ }
+ else
+ {
+ unsigned int inputIndex = 0;
+
+ // Keep this implementation, as using DataLayoutIndexed::GetIndex
+ // causes large performance regression.
+ inputIndex = batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
+ (zInput-paddingFront) * inputHeight * inputWidth * inChannels +
+ (yInput-paddingTop) * inputWidth * inChannels +
+ (xInput-paddingLeft) * inChannels +
+ cInput;
+
+ inputValue = inputVec[inputIndex];
+ }
+
+ sum += filterVec[filterIndex] * inputValue;
+ }
+ }
+ }
+ }
+
+ if (biasEnabled)
+ {
+ sum += biasVec[cOutput];
+ }
+
+ unsigned int outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
+ zOutput * outputHeight * outputWidth * outChannels +
+ yOutput * outputWidth * outChannels +
+ xOutput * outChannels +
+ cOutput;
+
+ rOutputEncoder[outIdx];
+ rOutputEncoder.Set(sum);
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/Conv3dImpl.hpp b/src/backends/reference/workloads/Conv3dImpl.hpp
new file mode 100644
index 0000000000..5cf2ed942d
--- /dev/null
+++ b/src/backends/reference/workloads/Conv3dImpl.hpp
@@ -0,0 +1,38 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+#include <armnn/Tensor.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
+namespace armnn
+{
+
+void Convolve3d(const TensorShape& rInputShape,
+ Decoder<float>& rInputDecoder,
+ const TensorShape& rOutputShape,
+ Encoder<float>& rOutputEncoder,
+ const TensorShape& rFilterShape,
+ Decoder<float>& rFilterDecoder,
+ bool biasEnabled,
+ Decoder<float>* pBiasDecoder,
+ DataLayout dataLayout,
+ unsigned int paddingTop,
+ unsigned int paddingLeft,
+ unsigned int paddingFront,
+ unsigned int xStride,
+ unsigned int yStride,
+ unsigned int zStride,
+ unsigned int xDilation,
+ unsigned int yDilation,
+ unsigned int zDilation);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
new file mode 100644
index 0000000000..ea425daec9
--- /dev/null
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
@@ -0,0 +1,76 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefConvolution3dWorkload.hpp"
+
+#include "Conv3dImpl.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+namespace armnn
+{
+RefConvolution3dWorkload::RefConvolution3dWorkload(
+ const Convolution3dQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<Convolution3dQueueDescriptor>(descriptor, info)
+{
+ WorkloadInfo detailsInfo;
+ detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
+ detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ if (descriptor.m_Parameters.m_BiasEnabled)
+ {
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ }
+
+ // Report Profiling Details
+ ARMNN_REPORT_PROFILING_WORKLOAD_DESC("RefConvolution3dWorkload_Construct",
+ descriptor.m_Parameters,
+ detailsInfo,
+ this->GetGuid());
+
+ m_Weight = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Weight ));
+ const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
+
+ m_FilterShape = rFilterInfo.GetShape();
+ m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight.get()->Map(true));
+
+ if ( descriptor.m_Parameters.m_BiasEnabled )
+ {
+ m_Bias = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Bias ));
+ const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
+ m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
+ }
+}
+
+void RefConvolution3dWorkload::Execute() const
+{
+ Execute(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+
+void RefConvolution3dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
+{
+ Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+}
+
+void RefConvolution3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_GUID(Compute::CpuRef, "RefConvolution3dWorkload_Execute", this->GetGuid());
+
+ std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
+ std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
+
+ const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape();
+ const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape();
+
+ Convolve3d(inputShape, *inputDecoder, outputShape, *outputEncoder, m_FilterShape,
+ *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
+ m_Data.m_Parameters.m_DataLayout,
+ m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, m_Data.m_Parameters.m_PadFront,
+ m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, m_Data.m_Parameters.m_StrideZ,
+ m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY, m_Data.m_Parameters.m_DilationZ);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
new file mode 100644
index 0000000000..0373a8b900
--- /dev/null
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
@@ -0,0 +1,38 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace armnn
+{
+
+class RefConvolution3dWorkload : public BaseWorkload<Convolution3dQueueDescriptor>
+{
+public:
+ explicit RefConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
+
+
+ void Execute() const override;
+ void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+
+private:
+ void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
+ std::unique_ptr<ScopedTensorHandle> m_Weight;
+ std::unique_ptr<ScopedTensorHandle> m_Bias;
+
+ std::unique_ptr<Decoder<float>> m_FilterDecoder;
+ std::unique_ptr<Decoder<float>> m_BiasDecoder;
+
+ TensorShape m_FilterShape;
+};
+
+} //namespace armnn
+
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 1cf84eed9e..ed3aa90e5f 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -22,6 +22,7 @@
#include "RefChannelShuffleWorkload.hpp"
#include "RefComparisonWorkload.hpp"
#include "RefConvolution2dWorkload.hpp"
+#include "RefConvolution3dWorkload.hpp"
#include "RefConstantWorkload.hpp"
#include "RefConcatWorkload.hpp"
#include "RefConvertBf16ToFp32Workload.hpp"