diff options
author | Matteo Martincigh <matteo.martincigh@arm.com> | 2018-12-18 09:26:39 +0000 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-01-04 17:28:07 +0000 |
commit | 747ef82c88f9afe14a8b80b6b3b34118353e97f2 (patch) | |
tree | a29ac33b84fb96a41103a0a97327189495374cc9 /src/backends/reference/workloads/RefDepthwiseConvolution2dUint8Workload.cpp | |
parent | 760892724d131c7da4b9baad05cddd49276ad6bb (diff) | |
download | armnn-747ef82c88f9afe14a8b80b6b3b34118353e97f2.tar.gz |
MLCE-77 Depthwise Convolution with depth multiplier > 1 doesn't work
* Unified ArmNN's weight format to [ M, I, H, W ] for the depthwise convolution
* Added conversion utilities to permute/reshape the weights as appropriate
when using CL and Neon backends
* Updated the reference implementation of the convolution
* Updated the relevant unit tests accordingly
!android-nn-driver:459
Change-Id: I07d0818efa9d1ca1e5dad82983aac1fe78eadb18
Diffstat (limited to 'src/backends/reference/workloads/RefDepthwiseConvolution2dUint8Workload.cpp')
-rw-r--r-- | src/backends/reference/workloads/RefDepthwiseConvolution2dUint8Workload.cpp | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dUint8Workload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dUint8Workload.cpp index e8e501d6ae..629b729ea6 100644 --- a/src/backends/reference/workloads/RefDepthwiseConvolution2dUint8Workload.cpp +++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dUint8Workload.cpp @@ -28,10 +28,7 @@ void RefDepthwiseConvolution2dUint8Workload::Execute() const const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); const uint8_t* weightsData = m_Weight->template GetConstTensor<uint8_t>(); const TensorInfo& weightsInfo = GetTensorInfo(m_Weight.get()); - const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? - m_Bias->template GetConstTensor<int32_t>() : - nullptr; - uint8_t* outputData = GetOutputTensorDataU8(0, m_Data); + const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr; const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); const TensorInfo& filterInfo = m_Weight->GetTensorInfo(); @@ -40,7 +37,7 @@ void RefDepthwiseConvolution2dUint8Workload::Execute() const inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), biasData, - outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true); + outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true); } } //namespace armnn |