aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2021-09-08 13:05:51 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2021-10-01 15:27:01 +0100
commitb63a31170aee1d28267d83a4bc67b57708fb6b05 (patch)
tree16cea0a872939be749b72f45ad125964439bc40e /src/armnn
parenteb852bb9e45b1db42a26001ece11ec7cc1f2bbfe (diff)
downloadarmnn-b63a31170aee1d28267d83a4bc67b57708fb6b05.tar.gz
IVGCVSW-6163 Add Conv3d FrontEnd and Ref Implementation
* Added front-end * Added Reference workload * Added Serializer & Deserializer support * Added unit tests * Added NDHWC DataLayout Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Iec4d39e7433b5334d52fa44cf8efc6bcd39319d8
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp15
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp49
-rw-r--r--src/armnn/Network.hpp5
-rw-r--r--src/armnn/SerializeLayerParameters.cpp27
-rw-r--r--src/armnn/SerializeLayerParameters.hpp5
-rw-r--r--src/armnn/layers/Convolution3dLayer.cpp172
-rw-r--r--src/armnn/layers/Convolution3dLayer.hpp68
-rw-r--r--src/armnn/test/InferOutputTests.cpp3
-rw-r--r--src/armnn/test/InferOutputTests.hpp37
10 files changed, 374 insertions, 9 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index c17d076955..1616fd1aad 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -282,6 +282,21 @@ bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
reasonIfUnsupported.value());
}
+bool LayerSupportHandle::IsConvolution3dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution3dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConvolution3dSupported(input,
+ output,
+ descriptor,
+ weights,
+ biases,
+ reasonIfUnsupported.value());
+}
+
bool LayerSupportHandle::IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 6f39ca0508..49c39b3985 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -21,6 +21,7 @@
#include "layers/ConvertFp32ToBf16Layer.hpp"
#include "layers/ConvertFp32ToFp16Layer.hpp"
#include "layers/Convolution2dLayer.hpp"
+#include "layers/Convolution3dLayer.hpp"
#include "layers/DebugLayer.hpp"
#include "layers/DepthToSpaceLayer.hpp"
#include "layers/DepthwiseConvolution2dLayer.hpp"
@@ -119,6 +120,7 @@ DECLARE_LAYER(ConvertFp16ToFp32)
DECLARE_LAYER(ConvertFp32ToBf16)
DECLARE_LAYER(ConvertFp32ToFp16)
DECLARE_LAYER(Convolution2d)
+DECLARE_LAYER(Convolution3d)
DECLARE_LAYER(Debug)
DECLARE_LAYER(DepthToSpace)
DECLARE_LAYER(DepthwiseConvolution2d)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 84097176e7..4070802be8 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -113,6 +113,15 @@ IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor
}
+IConnectableLayer* INetwork::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ return pNetworkImpl->AddConvolution3dLayer(convolution3dDescriptor, weights, biases, name);
+}
+
+
IConnectableLayer* INetwork::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
const char* name)
{
@@ -1991,22 +2000,21 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescrip
return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
}
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name)
+IConnectableLayer* NetworkImpl::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
{
- if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
+ if (convolution3dDescriptor.m_BiasEnabled && !biases.has_value())
{
- throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
+ throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
}
- const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
+ const auto layer = m_Graph->AddLayer<Convolution3dLayer>(convolution3dDescriptor, name);
layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
- if (convolution2dDescriptor.m_BiasEnabled)
+ if (convolution3dDescriptor.m_BiasEnabled)
{
layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
@@ -2020,6 +2028,29 @@ IConnectableLayer* NetworkImpl::AddDepthToSpaceLayer(const DepthToSpaceDescripto
return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
}
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
+ const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name)
+{
+ if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
+ {
+ throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be empty");
+ }
+
+ const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
+
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
+
+ if (convolution2dDescriptor.m_BiasEnabled)
+ {
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
+ }
+
+ return layer;
+}
+
IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 67c5b5af52..11759c71de 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -89,6 +89,11 @@ public:
const ConstTensor& biases,
const char* name = nullptr);
+ IConnectableLayer* AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char* name = nullptr);
+
IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr);
IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index 73e0cbce78..da2c39d4b6 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -101,6 +101,33 @@ void StringifyLayerParameters<Convolution2dDescriptor>::Serialize(ParameterStrin
fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
}
+void StringifyLayerParameters<Convolution3dDescriptor>::Serialize(ParameterStringifyFunction& fn,
+ const Convolution3dDescriptor& desc)
+{
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_PadTop << "," << desc.m_PadLeft
+ << "," << desc.m_PadBottom << "," << desc.m_PadRight
+ << "," << desc.m_PadFront << "," << desc.m_PadBack << ")";
+ fn("Padding(T,L,B,R,F,B)",ss.str());
+ }
+
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_StrideX << "," << desc.m_StrideY << "," << desc.m_StrideZ << ")";
+ fn("Stride(X,Y,Z)", ss.str());
+ }
+
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_DilationX << "," << desc.m_DilationY << "," << desc.m_DilationZ << ")";
+ fn("Dilation(X,Y)", ss.str());
+ }
+
+ fn("BiasEnabled",(desc.m_BiasEnabled ? "true" : "false"));
+ fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
+}
+
void StringifyLayerParameters<DetectionPostProcessDescriptor>::Serialize(ParameterStringifyFunction& fn,
const DetectionPostProcessDescriptor& desc)
{
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index f8fe5e2992..8a3630ce9d 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -55,6 +55,11 @@ template <> struct StringifyLayerParameters<Convolution2dDescriptor>
static void Serialize(ParameterStringifyFunction& fn, const Convolution2dDescriptor& desc);
};
+template <> struct StringifyLayerParameters<Convolution3dDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction& fn, const Convolution3dDescriptor& desc);
+};
+
template <> struct StringifyLayerParameters<DetectionPostProcessDescriptor>
{
static void Serialize(ParameterStringifyFunction& fn, const DetectionPostProcessDescriptor& desc);
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
new file mode 100644
index 0000000000..0e38c0b129
--- /dev/null
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -0,0 +1,172 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Convolution3dLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
+#include <backendsCommon/TensorHandle.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+Convolution3dLayer::Convolution3dLayer(const Convolution3dDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Convolution3d, param, name)
+{
+}
+
+void Convolution3dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
+{
+ const std::vector<TensorShape>& inputShapes =
+ {
+ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ m_Weight->GetTensorInfo().GetShape()
+ };
+
+ // Conv3d Filter Layout: [D,H,W,I,O]
+ const TensorShape filterShape = inputShapes[1];
+ DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
+ unsigned int filterDepth = filterShape[0];
+ unsigned int filterHeight = filterShape[1];
+ unsigned int filterWidth = filterShape[2];
+ unsigned int inChannels = filterShape[3];
+ unsigned int outChannels = filterShape[4];
+
+ fn("FilterDepth",std::to_string(filterDepth));
+ fn("FilterHeight",std::to_string(filterHeight));
+ fn("FilterWidth",std::to_string(filterWidth));
+ fn("InputChannels",std::to_string(inChannels));
+ fn("OutputChannels",std::to_string(outChannels));
+
+ LayerWithParameters<Convolution3dDescriptor>::SerializeLayerParameters(fn);
+}
+
+std::unique_ptr<IWorkload> Convolution3dLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ // At this level constant data should not be released.
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution3dLayer: Weights data should not be null.");
+
+ Convolution3dQueueDescriptor descriptor;
+ descriptor.m_Weight = m_Weight.get();
+
+ if (m_Param.m_BiasEnabled)
+ {
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "Convolution3dLayer: Bias data should not be null.");
+ descriptor.m_Bias = m_Bias.get();
+ }
+
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateConvolution3d(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+Convolution3dLayer* Convolution3dLayer::Clone(Graph& graph) const
+{
+ auto layer = CloneBase<Convolution3dLayer>(graph, m_Param, GetName());
+
+ layer->m_Weight = m_Weight ? m_Weight : nullptr;
+
+ if (layer->m_Param.m_BiasEnabled)
+ {
+ layer->m_Bias = m_Bias ? m_Bias : nullptr;
+ }
+
+ return std::move(layer);
+}
+
+std::vector<TensorShape> Convolution3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 2);
+ const TensorShape& inputShape = inputShapes[0];
+ const TensorShape& filterShape = inputShapes[1];
+
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 5, "Convolutions will always have 5D input.");
+
+ ARMNN_ASSERT( m_Param.m_StrideX > 0);
+ ARMNN_ASSERT( m_Param.m_StrideY > 0);
+ ARMNN_ASSERT( m_Param.m_StrideZ > 0);
+
+ DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
+
+ unsigned int inWidth = inputShape[dataLayoutIndex.GetWidthIndex()];
+ unsigned int inHeight = inputShape[dataLayoutIndex.GetHeightIndex()];
+ unsigned int inDepth = inputShape[dataLayoutIndex.GetDepthIndex()];
+ unsigned int inBatchSize = inputShape[0];
+
+ // Conv3d Filter Layout: [D,H,W,I,O]
+ unsigned int filterDepth = filterShape[0];
+ unsigned int dilatedFilterDepth = filterDepth + (m_Param.m_DilationZ - 1) * (filterDepth - 1);
+ unsigned int readDepth = (inDepth + m_Param.m_PadFront + m_Param.m_PadBack) - dilatedFilterDepth;
+ unsigned int outDepth = 1 + (readDepth / m_Param.m_StrideZ);
+
+ unsigned int filterHeight = filterShape[1];
+ unsigned int dilatedFilterHeight = filterHeight + (m_Param.m_DilationY - 1) * (filterHeight - 1);
+ unsigned int readHeight = (inHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - dilatedFilterHeight;
+ unsigned int outHeight = 1 + (readHeight / m_Param.m_StrideY);
+
+ unsigned int filterWidth = filterShape[2];
+ unsigned int dilatedFilterWidth = filterWidth + (m_Param.m_DilationX - 1) * (filterWidth - 1);
+ unsigned int readWidth = (inWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - dilatedFilterWidth;
+ unsigned int outWidth = 1 + (readWidth / m_Param.m_StrideX);
+
+ unsigned int outChannels = filterShape[4];
+ unsigned int outBatchSize = inBatchSize;
+
+ TensorShape tensorShape = TensorShape( { outBatchSize, outDepth, outHeight, outWidth, outChannels } );
+
+ return std::vector<TensorShape>({ tensorShape });
+}
+
+void Convolution3dLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ // check if we m_Weight data is not nullptr
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution3dLayer: Weights data should not be null.");
+
+ auto inferredShapes = InferOutputShapes({
+ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ m_Weight->GetTensorInfo().GetShape() });
+
+ ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution3dLayer");
+}
+
+Layer::ConstantTensors Convolution3dLayer::GetConstantTensorsByRef()
+{
+ return {m_Weight, m_Bias};
+}
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+void Convolution3dLayer::Accept(ILayerVisitor& visitor) const
+{
+ IgnoreUnused(visitor);
+ throw armnn::Exception("Convolution3dLayer: VisitConvolution3dLayer is not implemented");
+}
+ARMNN_NO_DEPRECATE_WARN_END
+
+void Convolution3dLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ ManagedConstTensorHandle managedWeight(m_Weight);
+ std::vector<armnn::ConstTensor> constTensors { { managedWeight.GetTensorInfo(), managedWeight.Map() } };
+
+ ManagedConstTensorHandle managedBias(m_Bias);
+ if (GetParameters().m_BiasEnabled)
+ {
+ constTensors.emplace_back(ConstTensor(managedBias.GetTensorInfo(), managedBias.Map()));
+ }
+
+ strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/Convolution3dLayer.hpp b/src/armnn/layers/Convolution3dLayer.hpp
new file mode 100644
index 0000000000..bef5715098
--- /dev/null
+++ b/src/armnn/layers/Convolution3dLayer.hpp
@@ -0,0 +1,68 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class ScopedTensorHandle;
+
+/// This layer represents a convolution 3d operation.
+class Convolution3dLayer : public LayerWithParameters<Convolution3dDescriptor>
+{
+public:
+
+ /// A unique pointer to store Weight values.
+ std::shared_ptr<ConstTensorHandle> m_Weight;
+ /// A unique pointer to store Bias values.
+ std::shared_ptr<ConstTensorHandle> m_Bias;
+
+ /// Makes a workload for the Convolution3d type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ Convolution3dLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref Convolution3dLayer.
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
+ void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
+
+protected:
+ /// Constructor to create a Convolution3dLayer.
+ /// @param [in] param Convolution3dDescriptor to configure the convolution3d operation.
+ /// @param [in] name Optional name for the layer.
+ Convolution3dLayer(const Convolution3dDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~Convolution3dLayer() = default;
+
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
+ ConstantTensors GetConstantTensorsByRef() override;
+};
+
+} // namespace
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 81ad7b2d38..5365b831cf 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -38,6 +38,9 @@ ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsNoMatch, StackValidate
// Convolution2D
ARMNN_SIMPLE_TEST_CASE(Convolution2dInferOutputShape, Convolution2dInferOutputShapeTest)
+// Convolution3D
+ARMNN_SIMPLE_TEST_CASE(Convolution3dInferOutputShape, Convolution3dInferOutputShapeTest)
+
// DepthwiseConvolution2D
ARMNN_SIMPLE_TEST_CASE(DepthwiseConvolution2dInferOutputShape, DepthwiseConvolution2dInferOutputShapeTest)
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index 6e2676ec8e..e2c854551f 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -464,6 +464,43 @@ void Convolution2dInferOutputShapeTest()
CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
}
+void Convolution3dInferOutputShapeTest()
+{
+ armnn::Graph graph;
+
+ armnn::Convolution3dDescriptor descriptor;
+ descriptor.m_DilationX = 1;
+ descriptor.m_DilationY = 1;
+ descriptor.m_DilationZ = 1;
+ descriptor.m_PadTop = 1;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadLeft = 1;
+ descriptor.m_PadRight = 1;
+ descriptor.m_PadFront = 1;
+ descriptor.m_PadBack = 1;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_StrideZ = 2;
+ descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
+
+ armnn::Convolution3dLayer* const convolution3dLayer =
+ graph.AddLayer<armnn::Convolution3dLayer>(descriptor, "convolution3d");
+
+ std::vector<armnn::TensorShape> shapes;
+ const std::vector<unsigned int> inputSize = {1, 5, 5, 5, 1};
+ armnn::TensorShape inputShape(5, inputSize.data());
+ shapes.push_back(inputShape);
+
+ const std::vector<unsigned int> filterSize = {3, 3, 3, 1, 1 };
+ armnn::TensorShape filterShape(5, filterSize.data());
+ shapes.push_back(filterShape);
+
+ const std::vector<unsigned int> expectedOutputSizes = {1, 3, 3, 3, 1};
+ armnn::TensorShape expectedOutputShape(5, expectedOutputSizes.data());
+
+ CHECK(expectedOutputShape == convolution3dLayer->InferOutputShapes(shapes).at(0));
+}
+
void TransposeConvolution2dInferOutputShapeTest()
{
armnn::Graph graph;