aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2021-10-18 13:07:49 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2021-10-20 16:03:04 +0100
commit5d7b0a314b3e354a6cbcf15f5dd78b50f1e02774 (patch)
tree3d844c4575193ffddfe3a17c51cb808c9f16ddb0
parent73010788725f8f07efb6df20711ece712ee213ea (diff)
downloadarmnn-5d7b0a314b3e354a6cbcf15f5dd78b50f1e02774.tar.gz
Add ConstTensorsAsInput support for Conv3d
* Constant weights and biases are now stored as Constant layers. * Updated Serializer, Deserializer and unit tests to reflect this. * Updated TfLiteParser. * Updated Ref backend to handle constant weights and bias as inputs rather than reading from member variables. * Added Conv3d EndToEnd test. * Added NCDHW DataLayout and unit tests. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I10cdd354ca5f1c748730f92ffdb36bf810f83c8e
-rw-r--r--include/armnn/Descriptors.hpp5
-rw-r--r--include/armnn/INetwork.hpp4
-rw-r--r--include/armnn/Types.hpp3
-rw-r--r--include/armnn/TypesUtils.hpp1
-rw-r--r--src/armnn/Descriptors.cpp11
-rw-r--r--src/armnn/Graph.cpp11
-rw-r--r--src/armnn/Network.cpp22
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/layers/Convolution3dLayer.cpp52
-rw-r--r--src/armnn/layers/Convolution3dLayer.hpp10
-rw-r--r--src/armnnDeserializer/Deserializer.cpp21
-rw-r--r--src/armnnDeserializer/test/DeserializeConvolution3d.cpp92
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs5
-rw-r--r--src/armnnSerializer/ArmnnSchema_generated.h39
-rw-r--r--src/armnnSerializer/Serializer.cpp22
-rw-r--r--src/armnnSerializer/Serializer.hpp1
-rw-r--r--src/armnnSerializer/SerializerUtils.cpp2
-rw-r--r--src/armnnSerializer/test/SerializerTestUtils.hpp1
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp17
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp27
-rw-r--r--src/armnnUtils/DataLayoutIndexed.cpp6
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp15
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp11
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp11
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt1
-rw-r--r--src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp167
-rw-r--r--src/backends/backendsCommon/test/DataLayoutUtils.hpp24
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp143
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp60
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp31
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp130
-rw-r--r--src/backends/reference/workloads/Conv3dImpl.cpp47
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.cpp33
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.hpp4
35 files changed, 687 insertions, 350 deletions
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index b412bbdcc9..39ea824045 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -498,6 +498,9 @@ struct Convolution3dDescriptor : BaseDescriptor
m_DataLayout == rhs.m_DataLayout;
}
+ /// Get the number of views/inputs.
+ uint32_t GetNumInputs() const;
+
/// Padding left value in the width dimension.
uint32_t m_PadLeft;
/// Padding right value in the width dimension.
@@ -524,7 +527,7 @@ struct Convolution3dDescriptor : BaseDescriptor
uint32_t m_DilationZ;
/// Enable/disable bias.
bool m_BiasEnabled;
- /// The data layout to be used (NDHWC).
+ /// The data layout to be used (NDHWC, NCDHW).
DataLayout m_DataLayout;
};
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index ab92f05112..707ae00bb3 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -258,13 +258,9 @@ public:
/// Adds a 3D convolution layer to the network.
/// @param convolution3dDescriptor - Description of the 3D convolution layer.
- /// @param weights - Tensor for the weights data.
- /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
/// @param name - Optional name for the layer.
/// @return - Interface for configuring the layer.
IConnectableLayer* AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name = nullptr);
/// Adds a depth to space layer to the network.
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 7f2e192102..4f39ebe16a 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -50,7 +50,8 @@ enum class DataLayout
{
NCHW = 1,
NHWC = 2,
- NDHWC = 3
+ NDHWC = 3,
+ NCDHW = 4
};
/// Define the behaviour of the internal profiler when outputting network details
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index d08f592d86..a1c11b74df 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -215,6 +215,7 @@ constexpr const char* GetDataLayoutName(DataLayout dataLayout)
case DataLayout::NCHW: return "NCHW";
case DataLayout::NHWC: return "NHWC";
case DataLayout::NDHWC: return "NDHWC";
+ case DataLayout::NCDHW: return "NCDHW";
default: return "Unknown";
}
}
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index ab68097247..ef55ee7bb5 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -441,4 +441,15 @@ uint32_t FullyConnectedDescriptor::GetNumInputs() const
return numInputs;
}
+uint32_t Convolution3dDescriptor::GetNumInputs() const
+{
+ // Return 2 otherwise check if bias is enabled
+ unsigned int numInputs = 2;
+ if (m_BiasEnabled)
+ {
+ numInputs = 3;
+ }
+ return numInputs;
+}
+
}
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 30639b12e8..0591bea99a 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -588,7 +588,7 @@ void Graph::InferTensorInfos()
}
/// Throws exception due to a layer input not being connected to an output slot.
-/// Verifies weights and bias are set for FullyConnected layers on input slots 1
+/// Verifies weights and bias are set for layers on input slots 1
/// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
///
/// @param layer constant pointer to a Layer object
@@ -600,7 +600,8 @@ void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
std::ostringstream message;
bool noWeightsAndBias = false;
- if (layer->GetType() == armnn::LayerType::FullyConnected && slotIndex > 0)
+ if ((layer->GetType() == armnn::LayerType::FullyConnected ||
+ layer->GetType() == armnn::LayerType::Convolution3d) && slotIndex > 0)
{
// If weights are not set and is bias enabled, also check if bias is set
if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
@@ -608,7 +609,7 @@ void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
if (biasSource == NULL)
{
- message << "FullyConnected layer weights and bias not set: ";
+ message << layer->GetName() << " layer weights and bias not set: ";
noWeightsAndBias = true;
}
}
@@ -618,11 +619,11 @@ void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
{
if (slotIndex == 1)
{
- message << "FullyConnected layer weights not set: ";
+ message << layer->GetName() << " layer weights not set: ";
}
else
{
- message << "FullyConnected layer bias not set: ";
+ message << layer->GetName() << " layer bias not set: ";
}
}
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 99d7b96ec2..b516d519d5 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -114,11 +114,9 @@ IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor
IConnectableLayer* INetwork::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name)
{
- return pNetworkImpl->AddConvolution3dLayer(convolution3dDescriptor, weights, biases, name);
+ return pNetworkImpl->AddConvolution3dLayer(convolution3dDescriptor, name);
}
@@ -1934,25 +1932,9 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescrip
}
IConnectableLayer* NetworkImpl::AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name)
{
- if (convolution3dDescriptor.m_BiasEnabled && !biases.has_value())
- {
- throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be empty");
- }
-
- const auto layer = m_Graph->AddLayer<Convolution3dLayer>(convolution3dDescriptor, name);
-
- layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
-
- if (convolution3dDescriptor.m_BiasEnabled)
- {
- layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
- }
-
- return layer;
+ return m_Graph->AddLayer<Convolution3dLayer>(convolution3dDescriptor, name);
}
IConnectableLayer* NetworkImpl::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index eb1d39d2f6..818a765296 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -87,8 +87,6 @@ public:
const char* name = nullptr);
IConnectableLayer* AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name = nullptr);
IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr);
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
index 0e38c0b129..1c2d1b9872 100644
--- a/src/armnn/layers/Convolution3dLayer.cpp
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -16,7 +16,7 @@ namespace armnn
{
Convolution3dLayer::Convolution3dLayer(const Convolution3dDescriptor& param, const char* name)
- : LayerWithParameters(1, 1, LayerType::Convolution3d, param, name)
+ : LayerWithParameters(param.GetNumInputs(), 1, LayerType::Convolution3d, param, name)
{
}
@@ -25,12 +25,11 @@ void Convolution3dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn
const std::vector<TensorShape>& inputShapes =
{
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
- m_Weight->GetTensorInfo().GetShape()
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(),
};
// Conv3d Filter Layout: [D,H,W,I,O]
const TensorShape filterShape = inputShapes[1];
- DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
unsigned int filterDepth = filterShape[0];
unsigned int filterHeight = filterShape[1];
unsigned int filterWidth = filterShape[2];
@@ -48,18 +47,7 @@ void Convolution3dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn
std::unique_ptr<IWorkload> Convolution3dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- // At this level constant data should not be released.
- ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution3dLayer: Weights data should not be null.");
-
Convolution3dQueueDescriptor descriptor;
- descriptor.m_Weight = m_Weight.get();
-
- if (m_Param.m_BiasEnabled)
- {
- ARMNN_ASSERT_MSG(m_Bias != nullptr, "Convolution3dLayer: Bias data should not be null.");
- descriptor.m_Bias = m_Bias.get();
- }
-
SetAdditionalInfo(descriptor);
return factory.CreateConvolution3d(descriptor, PrepInfoAndDesc(descriptor));
@@ -68,14 +56,6 @@ std::unique_ptr<IWorkload> Convolution3dLayer::CreateWorkload(const IWorkloadFac
Convolution3dLayer* Convolution3dLayer::Clone(Graph& graph) const
{
auto layer = CloneBase<Convolution3dLayer>(graph, m_Param, GetName());
-
- layer->m_Weight = m_Weight ? m_Weight : nullptr;
-
- if (layer->m_Param.m_BiasEnabled)
- {
- layer->m_Bias = m_Bias ? m_Bias : nullptr;
- }
-
return std::move(layer);
}
@@ -117,36 +97,33 @@ std::vector<TensorShape> Convolution3dLayer::InferOutputShapes(const std::vector
unsigned int outChannels = filterShape[4];
unsigned int outBatchSize = inBatchSize;
- TensorShape tensorShape = TensorShape( { outBatchSize, outDepth, outHeight, outWidth, outChannels } );
+ TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NDHWC ?
+ TensorShape( { outBatchSize, outDepth, outHeight, outWidth, outChannels } ) :
+ TensorShape( { outBatchSize, outChannels, outDepth, outHeight, outWidth });
return std::vector<TensorShape>({ tensorShape });
}
void Convolution3dLayer::ValidateTensorShapesFromInputs()
{
- VerifyLayerConnections(1, CHECK_LOCATION());
+ VerifyLayerConnections(m_Param.GetNumInputs(), CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- // check if we m_Weight data is not nullptr
- ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution3dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
+ "Convolution3dLayer: Weights should be connected to input slot 1.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
- m_Weight->GetTensorInfo().GetShape() });
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution3dLayer");
}
-Layer::ConstantTensors Convolution3dLayer::GetConstantTensorsByRef()
-{
- return {m_Weight, m_Bias};
-}
-
ARMNN_NO_DEPRECATE_WARN_BEGIN
void Convolution3dLayer::Accept(ILayerVisitor& visitor) const
{
@@ -157,16 +134,7 @@ ARMNN_NO_DEPRECATE_WARN_END
void Convolution3dLayer::ExecuteStrategy(IStrategy& strategy) const
{
- ManagedConstTensorHandle managedWeight(m_Weight);
- std::vector<armnn::ConstTensor> constTensors { { managedWeight.GetTensorInfo(), managedWeight.Map() } };
-
- ManagedConstTensorHandle managedBias(m_Bias);
- if (GetParameters().m_BiasEnabled)
- {
- constTensors.emplace_back(ConstTensor(managedBias.GetTensorInfo(), managedBias.Map()));
- }
-
- strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
} // namespace armnn
diff --git a/src/armnn/layers/Convolution3dLayer.hpp b/src/armnn/layers/Convolution3dLayer.hpp
index bef5715098..7cbd6428dc 100644
--- a/src/armnn/layers/Convolution3dLayer.hpp
+++ b/src/armnn/layers/Convolution3dLayer.hpp
@@ -16,12 +16,6 @@ class ScopedTensorHandle;
class Convolution3dLayer : public LayerWithParameters<Convolution3dDescriptor>
{
public:
-
- /// A unique pointer to store Weight values.
- std::shared_ptr<ConstTensorHandle> m_Weight;
- /// A unique pointer to store Bias values.
- std::shared_ptr<ConstTensorHandle> m_Bias;
-
/// Makes a workload for the Convolution3d type.
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
@@ -59,10 +53,6 @@ protected:
/// Default destructor
~Convolution3dLayer() = default;
-
- /// Retrieve the handles to the constant values stored by the layer.
- /// @return A vector of the constant tensors stored by this layer.
- ConstantTensors GetConstantTensorsByRef() override;
};
} // namespace
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 6b73946af2..c088ef7b54 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -449,6 +449,8 @@ armnn::DataLayout ToDataLayout(armnnSerializer::DataLayout dataLayout)
return armnn::DataLayout::NHWC;
case armnnSerializer::DataLayout::DataLayout_NDHWC:
return armnn::DataLayout::NDHWC;
+ case armnnSerializer::DataLayout::DataLayout_NCDHW:
+ return armnn::DataLayout::NCDHW;
case armnnSerializer::DataLayout::DataLayout_NCHW:
default:
return armnn::DataLayout::NCHW;
@@ -1402,7 +1404,6 @@ void IDeserializer::DeserializerImpl::ParseConvolution3d(GraphPtr graph, unsigne
CHECK_LAYERS(graph, 0, layerIndex);
auto inputs = GetInputs(graph, layerIndex);
CHECK_LOCATION();
- CHECK_VALID_SIZE(inputs.size(), 1);
auto outputs = GetOutputs(graph, layerIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
@@ -1424,22 +1425,14 @@ void IDeserializer::DeserializerImpl::ParseConvolution3d(GraphPtr graph, unsigne
descriptor.m_DilationX = serializerDescriptor->dilationX();
descriptor.m_DilationY = serializerDescriptor->dilationY();
descriptor.m_DilationZ = serializerDescriptor->dilationZ();
- descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();;
+ descriptor.m_BiasEnabled = serializerDescriptor->biasEnabled();
descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
- armnn::ConstTensor weights = ToConstTensor(serializerLayer->weights());
- armnn::ConstTensor biases;
+ uint32_t numInputs = descriptor.GetNumInputs();
+ CHECK_VALID_SIZE(inputs.size(), numInputs);
+
+ IConnectableLayer* layer = m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
- armnn::Optional<armnn::ConstTensor> optionalBiases = armnn::EmptyOptional();
- if (descriptor.m_BiasEnabled)
- {
- biases = ToConstTensor(serializerLayer->biases());
- optionalBiases = armnn::Optional<armnn::ConstTensor>(biases);
- }
- IConnectableLayer* layer = m_Network->AddConvolution3dLayer(descriptor,
- weights,
- optionalBiases,
- layerName.c_str());
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/src/armnnDeserializer/test/DeserializeConvolution3d.cpp b/src/armnnDeserializer/test/DeserializeConvolution3d.cpp
index 057ab6fbda..23fd811cdb 100644
--- a/src/armnnDeserializer/test/DeserializeConvolution3d.cpp
+++ b/src/armnnDeserializer/test/DeserializeConvolution3d.cpp
@@ -30,13 +30,11 @@ struct Convolution3dFixture : public ParserFlatbuffersSerializeFixture
base: {
layerName: "InputLayer",
layerType: "Input",
- inputSlots: [{
- index: 0,
- connection: {sourceLayerIndex:0, outputSlotIndex:0 },
- }],
+ inputSlots: [
+
+ ],
outputSlots: [
{
- index: 0,
tensorInfo: {
dimensions: )" + inputShape + R"(,
dataType: )" + dataType + R"(,
@@ -56,26 +54,19 @@ struct Convolution3dFixture : public ParserFlatbuffersSerializeFixture
}
},
{
- layer_type: "Convolution3dLayer",
+ layer_type: "ConstantLayer",
layer: {
base: {
index: 1,
- layerName: "convolution3d",
- layerType: "Convolution2d",
+ layerName: "Weights",
+ layerType: "Constant",
inputSlots: [
- {
- index: 0,
- connection: {
- sourceLayerIndex: 0,
- outputSlotIndex: 0
- }
- }
+
],
outputSlots: [
{
- index: 0,
tensorInfo: {
- dimensions: )" + outputShape + R"(,
+ dimensions: )" + weightsShape + R"(,
dataType: )" + dataType + R"(,
quantizationScale: 0.1,
dimensionSpecificity: [
@@ -89,12 +80,7 @@ struct Convolution3dFixture : public ParserFlatbuffersSerializeFixture
}
]
},
- descriptor: {
- strideX: 2,
- strideY: 2,
- strideZ: 2
- },
- weights: {
+ input: {
info: {
dimensions: )" + weightsShape + R"(,
dataType: )" + dataType + R"(,
@@ -127,29 +113,71 @@ struct Convolution3dFixture : public ParserFlatbuffersSerializeFixture
}
},
{
+ layer_type: "Convolution3dLayer",
+ layer: {
+ base: {
+ index: 2,
+ layerName: "convolution3d",
+ layerType: "Convolution3d",
+ inputSlots: [
+ {
+ connection: {
+ sourceLayerIndex: 0,
+ outputSlotIndex: 0
+ }
+ },
+ {
+ index: 1,
+ connection: {
+ sourceLayerIndex: 1,
+ outputSlotIndex: 0
+ }
+ }
+ ],
+ outputSlots: [
+ {
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(,
+ quantizationScale: 0.1,
+ dimensionSpecificity: [
+ true,
+ true,
+ true,
+ true,
+ true
+ ]
+ }
+ }
+ ]
+ },
+ descriptor: {
+ strideX: 2,
+ strideY: 2,
+ strideZ: 2
+ }
+ }
+ },
+ {
layer_type: "OutputLayer",
layer: {
base: {
layerBindingId: 2,
base: {
- index: 2,
+ index: 3,
layerName: "OutputLayer",
layerType: "Output",
inputSlots: [
{
connection: {
- sourceLayerIndex: 1,
+ sourceLayerIndex: 2,
outputSlotIndex: 0
}
}
],
- outputSlots: [{
- index: 0,
- tensorInfo: {
- dimensions: )" + outputShape + R"(,
- dataType: )" + dataType + R"(
- },
- }]
+ outputSlots: [
+
+ ]
}
}
}
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 77982888c8..c577a11a52 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -46,7 +46,8 @@ enum DataType : byte {
enum DataLayout : byte {
NHWC = 0,
NCHW = 1,
- NDHWC = 2
+ NDHWC = 2,
+ NCDHW = 3
}
enum ReduceOperation: byte {
@@ -287,8 +288,6 @@ table Convolution2dDescriptor {
table Convolution3dLayer {
base:LayerBase;
descriptor:Convolution3dDescriptor;
- weights:ConstTensor;
- biases:ConstTensor;
}
table Convolution3dDescriptor {
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index 8234aa9c47..712ad28574 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -540,31 +540,34 @@ enum DataLayout {
DataLayout_NHWC = 0,
DataLayout_NCHW = 1,
DataLayout_NDHWC = 2,
+ DataLayout_NCDHW = 3,
DataLayout_MIN = DataLayout_NHWC,
- DataLayout_MAX = DataLayout_NDHWC
+ DataLayout_MAX = DataLayout_NCDHW
};
-inline const DataLayout (&EnumValuesDataLayout())[3] {
+inline const DataLayout (&EnumValuesDataLayout())[4] {
static const DataLayout values[] = {
DataLayout_NHWC,
DataLayout_NCHW,
- DataLayout_NDHWC
+ DataLayout_NDHWC,
+ DataLayout_NCDHW
};
return values;
}
inline const char * const *EnumNamesDataLayout() {
- static const char * const names[4] = {
+ static const char * const names[5] = {
"NHWC",
"NCHW",
"NDHWC",
+ "NCDHW",
nullptr
};
return names;
}
inline const char *EnumNameDataLayout(DataLayout e) {
- if (flatbuffers::IsOutRange(e, DataLayout_NHWC, DataLayout_NDHWC)) return "";
+ if (flatbuffers::IsOutRange(e, DataLayout_NHWC, DataLayout_NCDHW)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesDataLayout()[index];
}
@@ -3250,9 +3253,7 @@ struct Convolution3dLayer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef Convolution3dLayerBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_BASE = 4,
- VT_DESCRIPTOR = 6,
- VT_WEIGHTS = 8,
- VT_BIASES = 10
+ VT_DESCRIPTOR = 6
};
const armnnSerializer::LayerBase *base() const {
return GetPointer<const armnnSerializer::LayerBase *>(VT_BASE);
@@ -3260,22 +3261,12 @@ struct Convolution3dLayer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const armnnSerializer::Convolution3dDescriptor *descriptor() const {
return GetPointer<const armnnSerializer::Convolution3dDescriptor *>(VT_DESCRIPTOR);
}
- const armnnSerializer::ConstTensor *weights() const {
- return GetPointer<const armnnSerializer::ConstTensor *>(VT_WEIGHTS);
- }
- const armnnSerializer::ConstTensor *biases() const {
- return GetPointer<const armnnSerializer::ConstTensor *>(VT_BIASES);
- }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_BASE) &&
verifier.VerifyTable(base()) &&
VerifyOffset(verifier, VT_DESCRIPTOR) &&
verifier.VerifyTable(descriptor()) &&
- VerifyOffset(verifier, VT_WEIGHTS) &&
- verifier.VerifyTable(weights()) &&
- VerifyOffset(verifier, VT_BIASES) &&
- verifier.VerifyTable(biases()) &&
verifier.EndTable();
}
};
@@ -3290,12 +3281,6 @@ struct Convolution3dLayerBuilder {
void add_descriptor(flatbuffers::Offset<armnnSerializer::Convolution3dDescriptor> descriptor) {
fbb_.AddOffset(Convolution3dLayer::VT_DESCRIPTOR, descriptor);
}
- void add_weights(flatbuffers::Offset<armnnSerializer::ConstTensor> weights) {
- fbb_.AddOffset(Convolution3dLayer::VT_WEIGHTS, weights);
- }
- void add_biases(flatbuffers::Offset<armnnSerializer::ConstTensor> biases) {
- fbb_.AddOffset(Convolution3dLayer::VT_BIASES, biases);
- }
explicit Convolution3dLayerBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -3311,12 +3296,8 @@ struct Convolution3dLayerBuilder {
inline flatbuffers::Offset<Convolution3dLayer> CreateConvolution3dLayer(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<armnnSerializer::LayerBase> base = 0,
- flatbuffers::Offset<armnnSerializer::Convolution3dDescriptor> descriptor = 0,
- flatbuffers::Offset<armnnSerializer::ConstTensor> weights = 0,
- flatbuffers::Offset<armnnSerializer::ConstTensor> biases = 0) {
+ flatbuffers::Offset<armnnSerializer::Convolution3dDescriptor> descriptor = 0) {
Convolution3dLayerBuilder builder_(_fbb);
- builder_.add_biases(biases);
- builder_.add_weights(weights);
builder_.add_descriptor(descriptor);
builder_.add_base(base);
return builder_.Finish();
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 7e1b74e10d..84a9d53b69 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -388,18 +388,15 @@ void SerializerStrategy::SerializeConvolution2dLayer(const armnn::IConnectableLa
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
}
-// Build FlatBuffer for Convolution2dLayer
+// Build FlatBuffer for Convolution3dLayer
void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
const armnn::Convolution3dDescriptor& descriptor,
- const std::vector<armnn::ConstTensor>& constants,
const char* name)
{
IgnoreUnused(name);
- const armnn::ConstTensor weights = constants[0];
-
// Create FlatBuffer BaseLayer
- auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
+ auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution3d);
auto flatBufferDescriptor = CreateConvolution3dDescriptor(m_flatBufferBuilder,
descriptor.m_PadLeft,
@@ -416,21 +413,11 @@ void SerializerStrategy::SerializeConvolution3dLayer(const armnn::IConnectableLa
descriptor.m_DilationZ,
descriptor.m_BiasEnabled,
GetFlatBufferDataLayout(descriptor.m_DataLayout));
- auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
- flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
-
- if (constants.size() > 1)
- {
- const armnn::ConstTensor biases = constants[1];
- flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases);
- }
- // Create the FlatBuffer Convolution2dLayer
+ // Create the FlatBuffer Convolution3dLayer
auto flatBufferLayer = CreateConvolution3dLayer(m_flatBufferBuilder,
flatBufferBaseLayer,
- flatBufferDescriptor,
- flatBufferWeightsConstTensorInfo,
- flatBufferBiasesConstTensorInfo);
+ flatBufferDescriptor);
// Add the AnyLayer to the FlatBufferLayers
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution3dLayer);
@@ -2038,7 +2025,6 @@ void SerializerStrategy::ExecuteStrategy(const armnn::IConnectableLayer* layer,
static_cast<const armnn::Convolution3dDescriptor&>(descriptor);
SerializeConvolution3dLayer(layer,
layerDescriptor,
- constants,
name);
break;
}
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 2f827ac059..1c0a9a619f 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -150,7 +150,6 @@ private:
void SerializeConvolution3dLayer(const armnn::IConnectableLayer* layer,
const armnn::Convolution3dDescriptor& descriptor,
- const std::vector<armnn::ConstTensor>& constants,
const char* name = nullptr);
void SerializeDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index fca6db8449..5ad27715c4 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -99,6 +99,8 @@ armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout
return armnnSerializer::DataLayout::DataLayout_NHWC;
case armnn::DataLayout::NDHWC:
return armnnSerializer::DataLayout::DataLayout_NDHWC;
+ case armnn::DataLayout::NCDHW:
+ return armnnSerializer::DataLayout::DataLayout_NCDHW;
case armnn::DataLayout::NCHW:
default:
return armnnSerializer::DataLayout::DataLayout_NCHW;
diff --git a/src/armnnSerializer/test/SerializerTestUtils.hpp b/src/armnnSerializer/test/SerializerTestUtils.hpp
index c6f148b1a1..ce4d2cc330 100644
--- a/src/armnnSerializer/test/SerializerTestUtils.hpp
+++ b/src/armnnSerializer/test/SerializerTestUtils.hpp
@@ -69,6 +69,7 @@ public:
{
case armnn::LayerType::Input: break;
case armnn::LayerType::Output: break;
+ case armnn::LayerType::Constant: break;
default:
{
VerifyNameAndConnections(layer, name);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index f2c9852607..2bffe0b9fd 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -472,25 +472,26 @@ TEST_CASE("SerializeConvolution3d")
armnn::INetworkPtr network = armnn::INetwork::Create();
armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
- armnn::IConnectableLayer* const convLayer =
- network->AddConvolution3dLayer(descriptor,
- weights,
- armnn::Optional<armnn::ConstTensor>(biases),
- layerName.c_str());
+ armnn::IConnectableLayer* const weightsLayer = network->AddConstantLayer(weights, "Weights");
+ armnn::IConnectableLayer* const biasesLayer = network->AddConstantLayer(biases, "Biases");
+ armnn::IConnectableLayer* const convLayer = network->AddConvolution3dLayer(descriptor, layerName.c_str());
armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+ weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
+ biasesLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+ biasesLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
CHECK(deserializedNetwork);
- const std::vector<armnn::ConstTensor>& constants {weights, biases};
- LayerVerifierBaseWithDescriptorAndConstants<armnn::Convolution3dDescriptor> verifier(
- layerName, {inputInfo}, {outputInfo}, descriptor, constants);
+ LayerVerifierBaseWithDescriptor<armnn::Convolution3dDescriptor> verifier(
+ layerName, {inputInfo, weightsInfo, biasesInfo}, {outputInfo}, descriptor);
deserializedNetwork->ExecuteStrategy(verifier);
}
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 81d491a1a1..7db5d85b13 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -1099,36 +1099,29 @@ void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo);
- armnn::IConnectableLayer* layer = nullptr;
auto layerName = fmt::format("Conv3D:{}:{}", subgraphIndex, operatorIndex);
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ // Add the first input and weights tensor to the registration list.
+ // The constant weights will be added by SetupConstantLayers.
+ std::vector<unsigned int> tensorIndexesToRegister = {inputTensorIndexes[0], inputTensorIndexes[1]};
+
if (inputs.size() == 3)
{
desc.m_BiasEnabled = true;
- armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
- auto biasTensorAndData = CreateConstTensorNonPermuted(inputs[2], biasTensorInfo);
- layer = m_Network->AddConvolution3dLayer(desc,
- filterTensorAndData,
- Optional<ConstTensor>(biasTensorAndData),
- layerName.c_str());
- }
- else
- {
- layer = m_Network->AddConvolution3dLayer(desc,
- filterTensorAndData,
- EmptyOptional(),
- layerName.c_str());
+
+ // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
+ tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
}
+ armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Register the input connection slots for the layer, connections are made after all layers have been created
- // only the tensors for the inputs are relevant, exclude the const tensors
- auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
- RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, tensorIndexesToRegister);
layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
// Register the output connection slots for the layer, connections are made after all layers have been created
diff --git a/src/armnnUtils/DataLayoutIndexed.cpp b/src/armnnUtils/DataLayoutIndexed.cpp
index c1c98fc0fd..01505a0a31 100644
--- a/src/armnnUtils/DataLayoutIndexed.cpp
+++ b/src/armnnUtils/DataLayoutIndexed.cpp
@@ -31,6 +31,12 @@ DataLayoutIndexed::DataLayoutIndexed(armnn::DataLayout dataLayout)
m_WidthIndex = 3;
m_ChannelsIndex = 4;
break;
+ case armnn::DataLayout::NCDHW:
+ m_ChannelsIndex = 1;
+ m_DepthIndex = 2;
+ m_HeightIndex = 3;
+ m_WidthIndex = 4;
+ break;
default:
throw armnn::InvalidArgumentException("Unknown DataLayout value: " +
std::to_string(static_cast<int>(dataLayout)));
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 27b59ea3a6..2716c827af 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1320,7 +1320,12 @@ void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
{
const std::string descriptorName{"Convolution3dQueueDescriptor"};
- ValidateNumInputs(workloadInfo, descriptorName, 1);
+ uint32_t numInputs = 2;
+ if (m_Parameters.m_BiasEnabled)
+ {
+ numInputs = 3;
+ }
+ ValidateNumInputs(workloadInfo, descriptorName, numInputs);
ValidateNumOutputs(workloadInfo, descriptorName, 1);
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
@@ -1329,9 +1334,7 @@ void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
- ValidatePointer(m_Weight, descriptorName, "weight");
-
- const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
+ const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 5, "weight");
ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
@@ -1339,9 +1342,7 @@ void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
Optional<TensorInfo> optionalBiasTensorInfo;
if (m_Parameters.m_BiasEnabled)
{
- ValidatePointer(m_Bias, descriptorName, "bias");
-
- optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
+ optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 29d39d14a9..4e56aaf823 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -208,18 +208,9 @@ struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2
void Validate(const WorkloadInfo& workloadInfo) const;
};
-// Convolution 2D layer workload data.
+// Convolution 3D layer workload data.
struct Convolution3dQueueDescriptor : QueueDescriptorWithParameters<Convolution3dDescriptor>
{
- Convolution3dQueueDescriptor()
- : m_Weight(nullptr)
- , m_Bias(nullptr)
- {
- }
-
- const ConstTensorHandle* m_Weight;
- const ConstTensorHandle* m_Bias;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 3b7f3a0f1f..55ce3554f9 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -250,7 +250,11 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
+
+ ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
+ "Convolution3dLayer: Weights should be connected as a Constant Layer.");
+ const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
+ dataType);
const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
@@ -258,14 +262,15 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
+ biases = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
+ GetBiasTypeFromWeightsType(dataType));
}
result = layerSupportObject.IsConvolution3dSupported(
input,
output,
descriptor,
- OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
+ weights,
biases,
reason);
break;
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index e3221c5ae4..b90407fd7c 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -13,6 +13,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
ChannelShuffleEndToEndTestImpl.hpp
ComparisonEndToEndTestImpl.hpp
CompatibilityTests.cpp
+ Convolution3dEndToEndTestImpl.hpp
CustomMemoryOptimizerStrategyTests.cpp
DefaultAsyncExecuteTest.cpp
DepthToSpaceEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..33bf9a180b
--- /dev/null
+++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
@@ -0,0 +1,167 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "EndToEndTestImpl.hpp"
+#include "QuantizeHelper.hpp"
+
+#include <ResolveType.hpp>
+
+#include <backendsCommon/test/CommonTestUtils.hpp>
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+
+#include <map>
+#include <vector>
+
+namespace
+{
+
+armnn::INetworkPtr CreateConvolution3dNetwork(const armnn::Convolution3dDescriptor& descriptor,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& weightsInfo,
+ const armnn::TensorInfo& biasInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::ConstTensor& weights,
+ const armnn::ConstTensor& biases)
+{
+ using namespace armnn;
+
+ INetworkPtr network(INetwork::Create());
+ IConnectableLayer* input = network->AddInputLayer(0, "input");
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights");
+ armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias");
+ IConnectableLayer* convolution3d = network->AddConvolution3dLayer(descriptor, "convolution3d");
+ IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+ Connect(input, convolution3d, inputInfo, 0, 0);
+ Connect(weightsLayer, convolution3d, weightsInfo, 0, 1);
+ Connect(biasLayer, convolution3d, biasInfo, 0, 2);
+ Connect(convolution3d, output, outputInfo, 0, 0);
+
+ return network;
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
+void Convolution3dEndToEnd(const std::vector<armnn::BackendId>& backends,
+ armnn::DataLayout dataLayout)
+{
+ using namespace armnn;
+ using T = ResolveType<ArmnnType>;
+ using BT = ResolveType<ArmnnBType>;
+
+ const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
+ const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
+
+ TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset);
+ TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+ TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true);
+ TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
+
+ std::vector<float> inputData =
+ {
+ 0.0f, 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+ 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
+ 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f,
+ 25.0f, 26.0f, 27.0f, 28.0f, 29.0f,
+ 30.0f, 31.0f, 32.0f, 33.0f, 34.0f,
+ 35.0f, 36.0f, 37.0f, 38.0f, 39.0f,
+ 40.0f, 41.0f, 42.0f, 43.0f, 44.0f,
+
+ 45.0f, 46.0f, 47.0f, 48.0f, 49.0f,
+ 50.0f, 51.0f, 52.0f, 53.0f, 54.0f,
+ 55.0f, 56.0f, 57.0f, 58.0f, 59.0f,
+ 60.0f, 61.0f, 62.0f, 63.0f, 64.0f,
+ 65.0f, 66.0f, 67.0f, 68.0f, 69.0f,
+
+ 70.0f, 71.0f, 72.0f, 73.0f, 74.0f,
+ 75.0f, 76.0f, 77.0f, 78.0f, 79.0f,
+ 80.0f, 81.0f, 82.0f, 83.0f, 84.0f,
+ 85.0f, 86.0f, 87.0f, 88.0f, 89.0f,
+ 90.0f, 91.0f, 92.0f, 93.0f, 94.0f,
+ 95.0f, 96.0f, 97.0f, 98.0f, 99.0f,
+
+ 100.0f, 101.0f, 102.0f, 103.0f, 104.0f,
+ 105.0f, 106.0f, 107.0f, 108.0f, 109.0f,
+ 110.0f, 111.0f, 112.0f, 113.0f, 114.0f,
+ 115.0f, 116.0f, 117.0f, 118.0f, 119.0f,
+ 120.0f, 121.0f, 122.0f, 123.0f, 124.0f
+ };
+
+ std::vector<float> weightsData =
+ {
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+
+ 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f,
+
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f,
+ };
+
+ std::vector<float> biasesData = { 1.f };
+
+ std::vector<float> expectedOutputData =
+ {
+ 559.0f, 595.0f,
+
+ 739.0f, 775.0f,
+
+ 1459.0f, 1495.0f,
+
+ 1639.0f, 1675.0f,
+ };
+
+ Convolution3dDescriptor descriptor;
+ descriptor.m_PadLeft = 0;
+ descriptor.m_PadRight = 0;
+ descriptor.m_PadTop = 0;
+ descriptor.m_PadBottom = 0;
+ descriptor.m_PadFront = 0;
+ descriptor.m_PadBack = 0;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_StrideZ = 2;
+ descriptor.m_BiasEnabled = true;
+ descriptor.m_DataLayout = dataLayout;
+
+ // Permute input and output if NCDHW.
+ if (dataLayout == DataLayout::NCDHW)
+ {
+ PermuteTensorNdhwcToNcdhw(inputInfo, inputData);
+ PermuteTensorNdhwcToNcdhw(outputInfo, expectedOutputData);
+ }
+
+ // Quantize data
+ std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+ std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
+
+ std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
+
+ ConstTensor weights(weightsInfo, qWeightsData);
+ ConstTensor biases(biasesInfo, qBiasesData);
+
+ INetworkPtr network = CreateConvolution3dNetwork(descriptor,
+ inputInfo,
+ weightsInfo,
+ biasesInfo,
+ outputInfo,
+ weights,
+ biases);
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+ { { 0, qInputData } },
+ { { 0, qExpectedOutputData } },
+ backends);
+}
diff --git a/src/backends/backendsCommon/test/DataLayoutUtils.hpp b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
index 9411212f4f..89b3900979 100644
--- a/src/backends/backendsCommon/test/DataLayoutUtils.hpp
+++ b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
@@ -34,3 +34,27 @@ void PermuteTensorNhwcToNchw(armnn::TensorInfo& tensorInfo, std::vector<T>& tens
tensorData = tmp;
}
+
+template<typename T>
+void PermuteTensorNdhwcToNcdhw(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+ const armnn::PermutationVector ndhwcToNcdhw = { 0, 2, 3, 4, 1 };
+
+ tensorInfo = armnnUtils::Permuted(tensorInfo, ndhwcToNcdhw);
+
+ std::vector<T> tmp(tensorData.size());
+ armnnUtils::Permute(tensorInfo.GetShape(), ndhwcToNcdhw, tensorData.data(), tmp.data(), sizeof(T));
+ tensorData = tmp;
+}
+
+template<typename T>
+void PermuteTensorNcdhwToNdhwc(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+ const armnn::PermutationVector ncdhwToNdhwc = { 0, 4, 1, 2, 3 };
+
+ tensorInfo = armnnUtils::Permuted(tensorInfo, ncdhwToNdhwc);
+
+ std::vector<T> tmp(tensorData.size());
+ armnnUtils::Permute(tensorInfo.GetShape(), ncdhwToNdhwc, tensorData.data(), tmp.data(), sizeof(T));
+ tensorData = tmp;
+}
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index c3a6aa1a3c..f9bdfde622 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -407,7 +407,7 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
}
catch (const LayerValidationException& exc)
{
- CHECK(strcmp(exc.what(), "FullyConnected layer weights not set: Input slot(s) 1 not connected "
+ CHECK(strcmp(exc.what(), "Fully_Connected layer weights not set: Input slot(s) 1 not connected "
"to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
}
}
@@ -434,7 +434,7 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
}
catch (const LayerValidationException& exc)
{
- CHECK(strcmp(exc.what(), "FullyConnected layer bias not set: Input slot(s) 2 not connected "
+ CHECK(strcmp(exc.what(), "Fully_Connected layer bias not set: Input slot(s) 2 not connected "
"to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
}
}
@@ -457,7 +457,7 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
}
catch (const LayerValidationException& exc)
{
- CHECK(strcmp(exc.what(), "FullyConnected layer weights and bias not set: Input slot(s) 1 & 2 not "
+ CHECK(strcmp(exc.what(), "Fully_Connected layer weights and bias not set: Input slot(s) 1 & 2 not "
"connected to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
}
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
index 259272d996..1406ab039b 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
@@ -11,6 +11,7 @@
#include <backendsCommon/TensorHandle.hpp>
+#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -228,23 +229,20 @@ LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset());
}
+ // Permute input and output if data layout is NCDHW.
+ if (dataLayout == armnn::DataLayout::NCDHW)
+ {
+ PermuteTensorNdhwcToNcdhw(inputTensorInfo, inputData);
+ PermuteTensorNdhwcToNcdhw(outputTensorInfo, outputData);
+ }
+
std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
- std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
-
- armnn::ScopedTensorHandle biasTensor(biasDesc);
- if (biasEnabled)
- {
- AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
- }
-
armnn::Convolution3dQueueDescriptor data;
- data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_StrideZ = strideZ;
@@ -261,14 +259,29 @@ LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
data.m_Parameters.m_BiasEnabled = biasEnabled;
armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
+ AddInputToWorkload(data, info, kernelDesc, input1Handle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+ std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
+ if (biasEnabled)
+ {
+ input2Handle = tensorHandleFactory.CreateTensorHandle(biasDesc);
+ AddInputToWorkload(data, info, biasDesc, input2Handle.get());
+ }
+
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution3d(data, info);
- inputHandle->Allocate();
+ input0Handle->Allocate();
+ input1Handle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+ CopyDataToITensorHandle(input0Handle.get(), inputData.data());
+ CopyDataToITensorHandle(input1Handle.get(), kernel.data());
+ if (biasEnabled)
+ {
+ input2Handle->Allocate();
+ CopyDataToITensorHandle(input2Handle.get(), bias.data());
+ }
ExecuteWorkload(*workload, memoryManager);
@@ -840,40 +853,44 @@ LayerTestResult<float, 5> SimpleConvolution3d3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> SimpleConvolution3d3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return SimpleConvolution3d3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
@@ -881,158 +898,174 @@ LayerTestResult<float, 5> Convolution3d2x2x2Strides3x5x5Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> Convolution3d2x2x2Strides3x5x5Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Strides3x5x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Dilation2x2x2TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int8_t, 5> Convolution3dPaddingSame3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dPaddingSame3x3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3dStrideDilationPadding3x3x3TestCommonFloat32(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2Stride3x3x3SmallTestCommonFloat32(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x3x3TestCommonFloat16(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled)
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
{
return Convolution3d2x2x2SmallTestCommonFloat16(
- workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, armnn::DataLayout::NDHWC);
+ workloadFactory, memoryManager, tensorHandleFactory, biasEnabled, dataLayout);
}
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
index a07c183c76..c612e19c9b 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.hpp
@@ -24,118 +24,138 @@ LayerTestResult<float, 5> SimpleConvolution3d3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> SimpleConvolution3d3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> SimpleConvolution3d3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> SimpleConvolution3d3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3d2x2x2Strides3x5x5Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> Convolution3d2x2x2Strides3x5x5Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Strides3x5x5Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> Convolution3d2x2x2Strides3x5x5Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3d2x2x2Dilation2x2x2Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> Convolution3d2x2x2Dilation2x2x2Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> Convolution3d2x2x2Dilation2x2x2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> Convolution3d2x2x2Dilation2x2x2Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3dPaddingSame3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int8_t , 5> Convolution3dPaddingSame3x3x3Int8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<uint8_t, 5> Convolution3dPaddingSame3x3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<int16_t, 5> Convolution3dPaddingSame3x3x3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3dStrideDilationPadding3x3x3Float32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<float, 5> Convolution3d2x2x2Stride3x3x3SmallFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<armnn::Half, 5> Convolution3d2x3x3Float16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
LayerTestResult<armnn::Half, 5> Convolution3d2x2x2SmallFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- bool biasEnabled);
+ bool biasEnabled,
+ armnn::DataLayout dataLayout);
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 0cc8f4aa10..dc4dcecd81 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -11,6 +11,7 @@
#include <backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
+#include <backendsCommon/test/Convolution3dEndToEndTestImpl.hpp>
#include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
#include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
@@ -566,6 +567,36 @@ TEST_CASE("RefConcatEndToEndDim3Uint8Test")
ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
+TEST_CASE("RefConvolution3dFloat32Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dNcdhwFloat32Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
+ armnn::DataLayout::NCDHW);
+}
+
+TEST_CASE("RefConvolution3dFloat16Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::Float16, armnn::DataType::Float16>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dUint8Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
+TEST_CASE("RefConvolution3dInt8Test")
+{
+ Convolution3dEndToEnd<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(defaultBackends,
+ armnn::DataLayout::NDHWC);
+}
+
TEST_CASE("RefEluEndToEndTestFloat32")
{
EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index f5d388d007..cb31b37161 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -208,37 +208,119 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue,
false,
DataLayout::NHWC);
-// Convolution 3d
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32, SimpleConvolution3d3x3x3Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8, SimpleConvolution3d3x3x3Int8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8, SimpleConvolution3d3x3x3Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int16, SimpleConvolution3d3x3x3Int16Test, false)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32, Convolution3d2x2x2Strides3x5x5Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8, Convolution3d2x2x2Strides3x5x5Int8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8, Convolution3d2x2x2Strides3x5x5Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt16, Convolution3d2x2x2Strides3x5x5Int16Test, true)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32, Convolution3dPaddingSame3x3x3Float32Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8, Convolution3dPaddingSame3x3x3Int8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8, Convolution3dPaddingSame3x3x3Uint8Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt16, Convolution3dPaddingSame3x3x3Int16Test, false)
-
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2Float32, Convolution3d2x2x2Dilation2x2x2Float32Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt8, Convolution3d2x2x2Dilation2x2x2Int8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestUint8, Convolution3d2x2x2Dilation2x2x2Uint8Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt16, Convolution3d2x2x2Dilation2x2x2Int16Test, true)
+// Convolution 3d - NDHWC
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32,
+ SimpleConvolution3d3x3x3Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int8,
+ SimpleConvolution3d3x3x3Int8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Uint8,
+ SimpleConvolution3d3x3x3Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Int16,
+ SimpleConvolution3d3x3x3Int16Test,
+ false,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5Float32,
+ Convolution3d2x2x2Strides3x5x5Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt8,
+ Convolution3d2x2x2Strides3x5x5Int8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestUint8,
+ Convolution3d2x2x2Strides3x5x5Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5TestInt16,
+ Convolution3d2x2x2Strides3x5x5Int16Test,
+ true,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3Float32,
+ Convolution3dPaddingSame3x3x3Float32Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt8,
+ Convolution3dPaddingSame3x3x3Int8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestUint8,
+ Convolution3dPaddingSame3x3x3Uint8Test,
+ false,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3TestInt16,
+ Convolution3dPaddingSame3x3x3Int16Test,
+ false,
+ DataLayout::NDHWC)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2Float32,
+ Convolution3d2x2x2Dilation2x2x2Float32Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt8,
+ Convolution3d2x2x2Dilation2x2x2Int8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestUint8,
+ Convolution3d2x2x2Dilation2x2x2Uint8Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2TestInt16,
+ Convolution3d2x2x2Dilation2x2x2Int16Test,
+ true,
+ DataLayout::NDHWC)
ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dStrideDilationPadding3x3x3Float32,
Convolution3dStrideDilationPadding3x3x3Float32Test,
- true)
+ true,
+ DataLayout::NDHWC)
ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Stride3x3x3SmallTestFloat32,
Convolution3d2x2x2Stride3x3x3SmallFloat32Test,
- false)
+ false,
+ DataLayout::NDHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16, Convolution3d2x3x3Float16Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16, Convolution3d2x2x2SmallFloat16Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestFloat16,
+ Convolution3d2x3x3Float16Test,
+ true,
+ DataLayout::NDHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2SmallTestFloat16,
+ Convolution3d2x2x2SmallFloat16Test,
+ false,
+ DataLayout::NDHWC)
+
+// Convolution 3d - NCDHW
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3NcdhwFloat32,
+ SimpleConvolution3d3x3x3Float32Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x3x3TestNcdhwFloat16,
+ Convolution3d2x3x3Float16Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Strides3x5x5NcdhwTestInt8,
+ Convolution3d2x2x2Strides3x5x5Int8Test,
+ true,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3dPaddingSame3x3x3NcdhwTestUint8,
+ Convolution3dPaddingSame3x3x3Uint8Test,
+ false,
+ DataLayout::NCDHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution3d2x2x2Dilation2x2x2NcdhwTestInt16,
+ Convolution3d2x2x2Dilation2x2x2Int16Test,
+ true,
+ DataLayout::NCDHW)
// Depthwise Convolution
diff --git a/src/backends/reference/workloads/Conv3dImpl.cpp b/src/backends/reference/workloads/Conv3dImpl.cpp
index 484d887cfc..1c06d624a8 100644
--- a/src/backends/reference/workloads/Conv3dImpl.cpp
+++ b/src/backends/reference/workloads/Conv3dImpl.cpp
@@ -113,11 +113,25 @@ void Convolve3d(const TensorShape& rInputShape,
// Keep this implementation, as using DataLayoutIndexed::GetIndex
// causes large performance regression.
- inputIndex = batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
- (zInput-paddingFront) * inputHeight * inputWidth * inChannels +
- (yInput-paddingTop) * inputWidth * inChannels +
- (xInput-paddingLeft) * inChannels +
- cInput;
+ if (dataLayoutIndexed.GetDataLayout() == DataLayout::NDHWC)
+ {
+ inputIndex =
+ batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
+ (zInput-paddingFront) * inputHeight * inputWidth * inChannels +
+ (yInput-paddingTop) * inputWidth * inChannels +
+ (xInput-paddingLeft) * inChannels +
+ cInput;
+ }
+ else
+ {
+ // NCDHW DataLayout
+ inputIndex =
+ batchIdx * inputDepth * inputHeight * inputWidth * inChannels +
+ inputDepth * inputHeight * inputWidth * cInput +
+ (zInput-paddingFront) * inputHeight * inputWidth +
+ (yInput-paddingTop) * inputWidth +
+ xInput-paddingLeft;
+ }
inputValue = inputVec[inputIndex];
}
@@ -133,11 +147,24 @@ void Convolve3d(const TensorShape& rInputShape,
sum += biasVec[cOutput];
}
- unsigned int outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
- zOutput * outputHeight * outputWidth * outChannels +
- yOutput * outputWidth * outChannels +
- xOutput * outChannels +
- cOutput;
+ unsigned int outIdx;
+ if (dataLayoutIndexed.GetDataLayout() == DataLayout::NDHWC)
+ {
+ outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
+ zOutput * outputHeight * outputWidth * outChannels +
+ yOutput * outputWidth * outChannels +
+ xOutput * outChannels +
+ cOutput;
+ }
+ else
+ {
+ // NCDHW DataLayout
+ outIdx = batchIdx * outputDepth * outputHeight * outputWidth * outChannels +
+ cOutput * outputDepth * outputHeight * outputWidth +
+ zOutput * outputHeight * outputWidth +
+ yOutput * outputWidth +
+ xOutput;
+ }
rOutputEncoder[outIdx];
rOutputEncoder.Set(sum);
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
index ea425daec9..afab88f0a8 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
@@ -19,10 +19,10 @@ RefConvolution3dWorkload::RefConvolution3dWorkload(
WorkloadInfo detailsInfo;
detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
- detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
+ detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
if (descriptor.m_Parameters.m_BiasEnabled)
{
- detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
+ detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
}
// Report Profiling Details
@@ -30,18 +30,25 @@ RefConvolution3dWorkload::RefConvolution3dWorkload(
descriptor.m_Parameters,
detailsInfo,
this->GetGuid());
+}
- m_Weight = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Weight ));
- const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
+void RefConvolution3dWorkload::PostAllocationConfigure()
+{
+ PostAllocationConfigure(m_Data.m_Inputs, m_Data.m_Outputs);
+}
+void RefConvolution3dWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
+ std::vector<ITensorHandle*> outputs)
+{
+ IgnoreUnused(outputs);
+ const TensorInfo& rFilterInfo = GetTensorInfo(inputs[1]);
m_FilterShape = rFilterInfo.GetShape();
- m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight.get()->Map(true));
+ m_FilterDecoder = MakeDecoder<float>(rFilterInfo);
- if ( descriptor.m_Parameters.m_BiasEnabled )
+ if (m_Data.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Bias ));
- const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
- m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
+ const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
+ m_BiasDecoder = MakeDecoder<float>(biasInfo);
}
}
@@ -52,6 +59,8 @@ void RefConvolution3dWorkload::Execute() const
void RefConvolution3dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
{
+ PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+
Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
}
@@ -65,6 +74,12 @@ void RefConvolution3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::
const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape();
const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape();
+ m_FilterDecoder->Reset(inputs[1]->Map());
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ m_BiasDecoder->Reset(inputs[2]->Map());
+ }
+
Convolve3d(inputShape, *inputDecoder, outputShape, *outputEncoder, m_FilterShape,
*m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
m_Data.m_Parameters.m_DataLayout,
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
index 0373a8b900..4d97512095 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
@@ -19,14 +19,14 @@ public:
explicit RefConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor,
const WorkloadInfo& info);
+ void PostAllocationConfigure() override;
void Execute() const override;
void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
private:
+ void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedTensorHandle> m_Weight;
- std::unique_ptr<ScopedTensorHandle> m_Bias;
std::unique_ptr<Decoder<float>> m_FilterDecoder;
std::unique_ptr<Decoder<float>> m_BiasDecoder;