diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-04-14 17:55:11 +0100 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2022-05-05 16:10:06 +0000 |
commit | 0690265d83e5aa79bd174544a7b35330781619dd (patch) | |
tree | 2cb825017ee202ebcfa9c8428271a4dccaed72a4 /src/backends/reference/workloads | |
parent | 3a3a6bfaedc64fac3644c8fe88dbfc3947e2b3ab (diff) | |
download | armnn-0690265d83e5aa79bd174544a7b35330781619dd.tar.gz |
IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d
!android-nn-driver:7418
* Update Front-end and Tools.
* Updated Serializer, Deserializer and unit tests to reflect this.
* Updated TfLiteDelegate, TfLiteParser and OnnxParser.
* Change NNDriver to new API.
* Updated Ref.
* Neon and Cl backend partially completed (Backend.cpp files).
* Added dynamic or constant input EndToEnd tests.
* Added ConstantTensorAsInputMemeberVariableRedirect Optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ib18b6c10a093042e165e25237dc04a4c67ba82da
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r-- | src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp | 47 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp | 2 |
2 files changed, 42 insertions, 7 deletions
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp index ad5edde7e6..c1c3916292 100644 --- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp +++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp @@ -19,16 +19,41 @@ RefDepthwiseConvolution2dWorkload::RefDepthwiseConvolution2dWorkload( const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) : RefBaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info) { - m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight)); - const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo(); - m_FilterShape = rFilterInfo.GetShape(); - m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight->Map(true)); + WorkloadInfo detailsInfo; + detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos; + detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos; + detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]); if (descriptor.m_Parameters.m_BiasEnabled) { - m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias)); - const TensorInfo& biasInfo = m_Bias->GetTensorInfo(); - m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true)); + detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]); + } + + // Report Profiling Details + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("RefDepthwiseConvolution2dWorkload_Construct", + descriptor.m_Parameters, + detailsInfo, + this->GetGuid()); +} + +void RefDepthwiseConvolution2dWorkload::PostAllocationConfigure() +{ + PostAllocationConfigure(m_Data.m_Inputs, m_Data.m_Outputs); +} + +void RefDepthwiseConvolution2dWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs, + std::vector<ITensorHandle*> outputs) +{ + IgnoreUnused(outputs); + + const TensorInfo& rFilterInfo = GetTensorInfo(inputs[1]); + m_FilterShape = rFilterInfo.GetShape(); + m_FilterDecoder = MakeDecoder<float>(rFilterInfo); + + if (m_Data.m_Parameters.m_BiasEnabled) + { + const TensorInfo& biasInfo = GetTensorInfo(inputs[2]); + m_BiasDecoder = MakeDecoder<float>(biasInfo); } } @@ -39,6 +64,8 @@ void RefDepthwiseConvolution2dWorkload::Execute() const void RefDepthwiseConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) { + PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); + Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); } @@ -54,6 +81,12 @@ void RefDepthwiseConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inpu const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape(); const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape(); + m_FilterDecoder->Reset(inputs[1]->Map()); + if (m_Data.m_Parameters.m_BiasEnabled) + { + m_BiasDecoder->Reset(inputs[2]->Map()); + } + Convolve(inputShape, *inputDecoder, outputShape, *OutputEncoder, m_FilterShape, *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(), m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp index 5d4b483fa7..1c7de29b37 100644 --- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp +++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp @@ -17,11 +17,13 @@ public: explicit RefDepthwiseConvolution2dWorkload(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info); + void PostAllocationConfigure() override; void Execute() const override; void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override; private: + void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs); void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const; std::unique_ptr <ScopedTensorHandle> m_Weight; |