diff options
author | surmeh01 <surabhi.mehta@arm.com> | 2018-05-18 16:31:43 +0100 |
---|---|---|
committer | telsoa01 <telmo.soares@arm.com> | 2018-05-23 13:09:07 +0100 |
commit | 3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0 (patch) | |
tree | 5950603ad78ec3fe56fb31ddc7f4d52a19f5bc60 /src/armnn/layers/Convolution2dLayer.cpp | |
parent | bceff2fb3fc68bb0aa88b886900c34b77340c826 (diff) | |
download | armnn-3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0.tar.gz |
Release 18.05
Diffstat (limited to 'src/armnn/layers/Convolution2dLayer.cpp')
-rw-r--r-- | src/armnn/layers/Convolution2dLayer.cpp | 83 |
1 files changed, 83 insertions, 0 deletions
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp new file mode 100644 index 0000000000..3829f129bb --- /dev/null +++ b/src/armnn/layers/Convolution2dLayer.cpp @@ -0,0 +1,83 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Convolution2dLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <armnn/TypesUtils.hpp> +#include <backends/CpuTensorHandle.hpp> +#include <backends/WorkloadFactory.hpp> + +namespace armnn +{ + +Convolution2dLayer::Convolution2dLayer(const Convolution2dDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Convolution2d, param, name) +{ +} + +std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + Convolution2dQueueDescriptor descriptor; + + descriptor.m_Weight = m_Weight.get(); + if (m_Param.m_BiasEnabled) + { + descriptor.m_Bias = m_Bias.get(); + } + return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase<Convolution2dLayer>(graph, m_Param, GetName()); + layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr; + + if (layer->m_Param.m_BiasEnabled) + { + layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr; + } + + return std::move(layer); +} + +void Convolution2dLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "Convolution2dLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "Convolution2dLayer: TensorInfo must be set on connected OutputSlot."); + + + IOutputSlot* input = GetInputSlot(0).GetConnection(); + const TensorShape& inputShape = input->GetTensorInfo().GetShape(); + const TensorShape filterShape = m_Weight->GetTensorInfo().GetShape(); + + // If we support multiple batch dimensions in the future, then this assert will need to change. + BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input."); + + unsigned int inWidth = inputShape[3]; + unsigned int inHeight = inputShape[2]; + unsigned int inBatchSize = inputShape[0]; + + unsigned int filterWidth = filterShape[3]; + unsigned int readWidth = (inWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - (filterWidth); + unsigned int outWidth = 1+(readWidth / m_Param.m_StrideX); + + unsigned int filterHeight = filterShape[2]; + unsigned int readHeight = (inHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - (filterHeight); + unsigned int outHeight = 1+(readHeight / m_Param.m_StrideY); + + unsigned int outChannels = filterShape[0]; + unsigned int outBatchSize = inBatchSize; + + TensorShape shapeOut({outBatchSize, outChannels, outHeight, outWidth}); + ConditionalThrowIfNotEqual<LayerValidationException>( + "Convolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + shapeOut); +} + +} // namespace armnn |