diff options
author | nikraj01 <nikhil.raj@arm.com> | 2019-05-29 10:51:05 +0100 |
---|---|---|
committer | nikraj01 <nikhil.raj@arm.com> | 2019-05-29 10:51:05 +0100 |
commit | a121de3edee75180d335d8be42cdd6620a6063a5 (patch) | |
tree | dc766736db9eedad905f4f2e566a5d0e476d6240 /src/backends/reference/workloads | |
parent | e242f2dc646f41e9162aaaf74e057ce39fcb92df (diff) | |
download | armnn-a121de3edee75180d335d8be42cdd6620a6063a5.tar.gz |
IVGCVSW-3168 Refactor reference softmax workload into a single workload
Change-Id: Ie290efcbb9e3a6365cbd630cb2041e7b0f542505
Signed-off-by: nikraj01 <nikhil.raj@arm.com>
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r-- | src/backends/reference/workloads/CMakeLists.txt | 6 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp | 26 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp | 21 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp | 36 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefSoftmaxWorkload.cpp | 39 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefSoftmaxWorkload.hpp (renamed from src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp) | 4 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefWorkloads.hpp | 3 | ||||
-rw-r--r-- | src/backends/reference/workloads/Softmax.cpp | 19 | ||||
-rw-r--r-- | src/backends/reference/workloads/Softmax.hpp | 3 |
9 files changed, 58 insertions, 99 deletions
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 3db0314346..e2f93d72a9 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -99,10 +99,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefResizeBilinearUint8Workload.hpp RefRsqrtFloat32Workload.cpp RefRsqrtFloat32Workload.hpp - RefSoftmaxFloat32Workload.cpp - RefSoftmaxFloat32Workload.hpp - RefSoftmaxUint8Workload.cpp - RefSoftmaxUint8Workload.hpp + RefSoftmaxWorkload.cpp + RefSoftmaxWorkload.hpp RefSpaceToBatchNdWorkload.cpp RefSpaceToBatchNdWorkload.hpp RefSplitterFloat32Workload.cpp diff --git a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp b/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp deleted file mode 100644 index 1f519bda10..0000000000 --- a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefSoftmaxFloat32Workload.hpp" - -#include "RefWorkloadUtils.hpp" -#include "Softmax.hpp" - -#include "Profiling.hpp" - -namespace armnn -{ - -void RefSoftmaxFloat32Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxFloat32Workload_Execute"); - - Softmax(GetInputTensorDataFloat(0, m_Data), - GetOutputTensorDataFloat(0, m_Data), - GetTensorInfo(m_Data.m_Inputs[0]), - m_Data.m_Parameters.m_Beta); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp b/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp deleted file mode 100644 index 82ddfac303..0000000000 --- a/src/backends/reference/workloads/RefSoftmaxFloat32Workload.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <backendsCommon/Workload.hpp> -#include <backendsCommon/WorkloadData.hpp> - -namespace armnn -{ - -class RefSoftmaxFloat32Workload : public Float32Workload<SoftmaxQueueDescriptor> -{ -public: - using Float32Workload<SoftmaxQueueDescriptor>::Float32Workload; - virtual void Execute() const override; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp b/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp deleted file mode 100644 index 17114ec83a..0000000000 --- a/src/backends/reference/workloads/RefSoftmaxUint8Workload.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefSoftmaxUint8Workload.hpp" - -#include "RefWorkloadUtils.hpp" -#include "Softmax.hpp" - -#include "Profiling.hpp" - -#include <vector> - -namespace armnn -{ - -void RefSoftmaxUint8Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxUint8Workload_Execute"); - - const TensorInfo& tensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); - - auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), tensorInfo); - - std::vector<float> results(tensorInfo.GetNumElements()); - - Softmax(dequant.data(), - results.data(), - tensorInfo, - m_Data.m_Parameters.m_Beta); - - Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), GetTensorInfo(m_Data.m_Outputs[0])); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp new file mode 100644 index 0000000000..b17666738f --- /dev/null +++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp @@ -0,0 +1,39 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefSoftmaxWorkload.hpp" + +#include "Decoders.hpp" +#include "Encoders.hpp" +#include "RefWorkloadUtils.hpp" +#include "Softmax.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefSoftmaxWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxWorkload_Execute"); + + const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); + + std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map()); + Decoder<float> &decoder = *decoderPtr; + + const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map()); + Encoder<float> &encoder = *encoderPtr; + + Softmax(decoder, + encoder, + inputTensorInfo, + m_Data.m_Parameters.m_Beta); +} +} //namespace armnn diff --git a/src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp index bb7b2143c1..cf3623bf40 100644 --- a/src/backends/reference/workloads/RefSoftmaxUint8Workload.hpp +++ b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp @@ -11,10 +11,10 @@ namespace armnn { -class RefSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor> +class RefSoftmaxWorkload : public BaseWorkload<SoftmaxQueueDescriptor> { public: - using Uint8Workload<SoftmaxQueueDescriptor>::Uint8Workload; + using BaseWorkload<SoftmaxQueueDescriptor>::BaseWorkload; virtual void Execute() const override; }; diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 6ffec2bd06..ab3da88437 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -27,8 +27,7 @@ #include "FullyConnected.hpp" #include "Gather.hpp" #include "RefFloorFloat32Workload.hpp" -#include "RefSoftmaxFloat32Workload.hpp" -#include "RefSoftmaxUint8Workload.hpp" +#include "RefSoftmaxWorkload.hpp" #include "RefResizeBilinearFloat32Workload.hpp" #include "RefBatchNormalizationUint8Workload.hpp" #include "ResizeBilinear.hpp" diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp index 4f1016e86c..6cb219a6cc 100644 --- a/src/backends/reference/workloads/Softmax.cpp +++ b/src/backends/reference/workloads/Softmax.cpp @@ -12,16 +12,19 @@ namespace armnn { /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo. -void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta) +void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta) { - unsigned int numChannels = tensorInfo.GetShape()[1]; - for (unsigned int n = 0; n < tensorInfo.GetShape()[0]; n++) + unsigned int numChannels = inputTensorInfo.GetShape()[1]; + + for (unsigned int n = 0; n < inputTensorInfo.GetShape()[0]; n++) { // Find maximum channel. - float max = in[n * numChannels]; + in[n * numChannels]; + float max = in.Get(); for (unsigned int c = 1; c < numChannels; c++) { - float val = in[n * numChannels + c]; + in[n * numChannels + c]; + float val = in.Get(); if (val > max) { max = val; @@ -33,7 +36,8 @@ void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float be float sum = 0.0f; for (unsigned int c = 0; c < numChannels; c++) { - float val = in[n * numChannels + c]; + in[n * numChannels + c]; + float val = in.Get(); exponentials[c] = expf((val - max) * beta); sum += exponentials[c]; } @@ -41,7 +45,8 @@ void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float be // Divide exponentials by sum to give outputs. for (unsigned int c = 0; c < numChannels; c++) { - out[n * numChannels + c] = exponentials[c] / sum; + out[n * numChannels + c]; + out.Set(exponentials[c] / sum); } } } diff --git a/src/backends/reference/workloads/Softmax.hpp b/src/backends/reference/workloads/Softmax.hpp index 3b974f9e9e..3876293957 100644 --- a/src/backends/reference/workloads/Softmax.hpp +++ b/src/backends/reference/workloads/Softmax.hpp @@ -5,12 +5,13 @@ #pragma once +#include "BaseIterator.hpp" #include <armnn/Tensor.hpp> namespace armnn { /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo. -void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta); +void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta); } //namespace armnn |