diff options
author | Matthew Jackson <matthew.jackson@arm.com> | 2019-07-11 12:07:09 +0100 |
---|---|---|
committer | Áron Virginás-Tar <aron.virginas-tar@arm.com> | 2019-07-17 14:31:02 +0000 |
commit | 81e601c5a5ebf3de3dd6418942708158de50252a (patch) | |
tree | 48307f6d49639d7bc9bfa2db96a2de33d1095861 /src/backends/reference/workloads | |
parent | 01bfd1781a18508577b9135408465ee76f346ae5 (diff) | |
download | armnn-81e601c5a5ebf3de3dd6418942708158de50252a.tar.gz |
IVGCVSW-3419 Add reference workload support for the new Stack layer
* Added reference workload for the Stack layer
* Added factory methods
* Added validation support
* Added unit tests
Signed-off-by: Matthew Jackson <matthew.jackson@arm.com>
Change-Id: Ib14b72c15f53a2a2ca152afc357ce2aa405ccc88
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r-- | src/backends/reference/workloads/CMakeLists.txt | 4 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefStackWorkload.cpp | 57 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefStackWorkload.hpp | 22 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefWorkloads.hpp | 1 | ||||
-rw-r--r-- | src/backends/reference/workloads/Stack.cpp | 115 | ||||
-rw-r--r-- | src/backends/reference/workloads/Stack.hpp | 20 |
6 files changed, 219 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 696605d662..c9db057be5 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -109,6 +109,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefSpaceToDepthWorkload.hpp RefSplitterWorkload.cpp RefSplitterWorkload.hpp + RefStackWorkload.cpp + RefStackWorkload.hpp RefStridedSliceWorkload.cpp RefStridedSliceWorkload.hpp RefTransposeConvolution2dWorkload.cpp @@ -127,6 +129,8 @@ list(APPEND armnnRefBackendWorkloads_sources SpaceToDepth.cpp Splitter.hpp Splitter.cpp + Stack.cpp + Stack.hpp StridedSlice.hpp StridedSlice.cpp StringMapping.cpp diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp new file mode 100644 index 0000000000..be36f40633 --- /dev/null +++ b/src/backends/reference/workloads/RefStackWorkload.cpp @@ -0,0 +1,57 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefStackWorkload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "Stack.hpp" + +#include <Profiling.hpp> + +namespace armnn +{ + +RefStackWorkload::RefStackWorkload(const StackQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{} + +void RefStackWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStackWorkload_Execute"); + + // Can perform a simple concatenation when axis == 0 + if (!m_Data.m_Parameters.m_Axis) + { + float* output = GetOutputTensorData<float>(0, m_Data); + BOOST_ASSERT(output != nullptr); + + unsigned int numInputs = m_Data.m_Parameters.m_NumInputs; + unsigned int inputLength = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements(); + + for (unsigned int inputIdx=0; inputIdx<numInputs; ++inputIdx) + { + const float* input = GetInputTensorData<float>(inputIdx, m_Data); + for (unsigned int elmt=0; elmt<inputLength; ++elmt) + { + output[(inputIdx * inputLength) + elmt] = input[elmt]; + } + } + return; + } + + std::vector<std::unique_ptr<Decoder<float>>> inputDecoders; + for (unsigned int i=0; i<m_Data.m_Inputs.size(); ++i) + { + inputDecoders.push_back(MakeDecoder<float>(GetTensorInfo(m_Data.m_Inputs[i]), + m_Data.m_Inputs[i]->Map())); + } + std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(m_Data.m_Outputs[0]), + m_Data.m_Outputs[0]->Map()); + + Stack(m_Data, inputDecoders, *outputEncoder); +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefStackWorkload.hpp b/src/backends/reference/workloads/RefStackWorkload.hpp new file mode 100644 index 0000000000..ceb27d9f60 --- /dev/null +++ b/src/backends/reference/workloads/RefStackWorkload.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <backendsCommon/Workload.hpp> +#include <backendsCommon/WorkloadData.hpp> + +namespace armnn +{ + +class RefStackWorkload : public BaseWorkload<StackQueueDescriptor> +{ +public: + explicit RefStackWorkload(const StackQueueDescriptor& descriptor, + const WorkloadInfo& info); + virtual void Execute() const override; +}; + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 4bdf05daa8..e86dccd5bf 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -46,6 +46,7 @@ #include "RefSplitterWorkload.hpp" #include "RefSoftmaxWorkload.hpp" #include "RefSpaceToBatchNdWorkload.hpp" +#include "RefStackWorkload.hpp" #include "RefStridedSliceWorkload.hpp" #include "RefSpaceToDepthWorkload.hpp" #include "RefTransposeConvolution2dWorkload.hpp" diff --git a/src/backends/reference/workloads/Stack.cpp b/src/backends/reference/workloads/Stack.cpp new file mode 100644 index 0000000000..386c8992eb --- /dev/null +++ b/src/backends/reference/workloads/Stack.cpp @@ -0,0 +1,115 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Stack.hpp" +#include "RefWorkloadUtils.hpp" + +namespace armnn +{ + +void Stack(const StackQueueDescriptor& data, + std::vector<std::unique_ptr<Decoder<float>>>& inputs, + Encoder<float>& output) +{ + const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]); + const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[0]); + + unsigned int outputNumDims = outputInfo.GetNumDimensions(); + unsigned int inputNumDims = inputInfo.GetNumDimensions(); + + const armnn::TensorShape& outputDims = outputInfo.GetShape(); + const armnn::TensorShape& inputDims = inputInfo.GetShape(); + + unsigned int axis = data.m_Parameters.m_Axis; + + // Initialise output data + unsigned int numOutputElements = 1; + for (unsigned int i=0; i<outputNumDims; ++i) + { + numOutputElements *= outputDims[i]; + } + + const unsigned int iNumTensors = static_cast<unsigned int>(data.m_Inputs.size()); + const unsigned int iBatchSize = inputDims[0]; + const unsigned int iChannels = (inputNumDims > 1) ? inputDims[1] : 1; + const unsigned int iHeight = (inputNumDims > 2) ? inputDims[2] : 1; + const unsigned int iWidth = (inputNumDims > 3) ? inputDims[3] : 1; + + const unsigned int oBatchSize = outputDims[1]; + const unsigned int oChannels = (outputNumDims > 2) ? outputDims[2] : 1; + const unsigned int oHeight = (outputNumDims > 3) ? outputDims[3] : 1; + const unsigned int oWidth = (outputNumDims > 4) ? outputDims[4] : 1; + + // Array to store the input coordinates + // iCoordinates[0] = i, iCoordinates[1] = bi, iCoordinates[2] = ci + // iCoordinates[3] = hi, iCoordinates[4] = wi, iCoordinates[5] = 0 + // iCoordinates[5] will be always zero and used for not incrementing + // the output when the input has less than 4 dimensions + std::array<unsigned int, 6> iCoordinates{ 0 }; + + // Array of pointers used to map the output coordinates to the input ones, in accordance with the axis + // This array is initialized with &iCoordinates[5] since this will be always zero + std::array<unsigned int *, 5> oCoordinates = { &iCoordinates[5], + &iCoordinates[5], + &iCoordinates[5], + &iCoordinates[5], + &iCoordinates[5] }; + + // Set the axis coordinate + oCoordinates[axis] = &iCoordinates[0]; + + // Map the output coordinates, accounting for the axis + unsigned int dim_shift = 0; + for(unsigned int dim = 0; dim < inputNumDims; ++dim) + { + if(dim == axis) + { + dim_shift++; + } + oCoordinates[dim + dim_shift] = &iCoordinates[dim + 1]; + } + + // Alias for the input coordinates + unsigned int &i = iCoordinates[0]; + unsigned int &bi = iCoordinates[1]; + unsigned int &ci = iCoordinates[2]; + unsigned int &hi = iCoordinates[3]; + unsigned int &wi = iCoordinates[4]; + + // Alias for the output coordinates + unsigned int &o = *(oCoordinates[0]); + unsigned int &bo = *(oCoordinates[1]); + unsigned int &co = *(oCoordinates[2]); + unsigned int &ho = *(oCoordinates[3]); + unsigned int &wo = *(oCoordinates[4]); + + // Stack tensors + for(; i < iNumTensors; ++(i)) + { + for(bi = 0; bi < iBatchSize; ++(bi)) + { + for(ci = 0; ci < iChannels; ++(ci)) + { + for(hi = 0; hi < iHeight; ++(hi)) + { + for(wi = 0; wi < iWidth; ++(wi)) + { + output[o * oWidth * oHeight * oChannels * oBatchSize + + bo * oWidth * oHeight * oChannels + + co * oWidth * oHeight + + ho * oWidth + + wo]; + + output.Set(inputs[i]->Get()); + + ++(*(inputs[i])); + } + } + } + } + } +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/Stack.hpp b/src/backends/reference/workloads/Stack.hpp new file mode 100644 index 0000000000..cd86d41552 --- /dev/null +++ b/src/backends/reference/workloads/Stack.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "Encoders.hpp" +#include "Decoders.hpp" + +#include <backendsCommon/WorkloadData.hpp> + +namespace armnn +{ + +void Stack (const StackQueueDescriptor& data, + std::vector<std::unique_ptr<Decoder<float>>>& inputs, + Encoder<float>& output); + +} // namespace armnn |