diff options
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r-- | src/backends/reference/workloads/CMakeLists.txt | 4 | ||||
-rw-r--r-- | src/backends/reference/workloads/Pad.cpp | 158 | ||||
-rw-r--r-- | src/backends/reference/workloads/Pad.hpp | 20 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefPadWorkload.cpp | 37 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefPadWorkload.hpp | 21 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefWorkloads.hpp | 3 |
6 files changed, 242 insertions, 1 deletions
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index be71a85047..bf65639c0d 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -16,6 +16,8 @@ list(APPEND armnnRefBackendWorkloads_sources FullyConnected.cpp FullyConnected.hpp Merger.hpp + Pad.cpp + Pad.hpp Pooling2d.cpp Pooling2d.hpp RefActivationFloat32Workload.cpp @@ -64,6 +66,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefMergerUint8Workload.hpp RefNormalizationFloat32Workload.cpp RefNormalizationFloat32Workload.hpp + RefPadWorkload.cpp + RefPadWorkload.hpp RefPermuteWorkload.cpp RefPermuteWorkload.hpp RefPooling2dFloat32Workload.cpp diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp new file mode 100644 index 0000000000..5c859317dd --- /dev/null +++ b/src/backends/reference/workloads/Pad.cpp @@ -0,0 +1,158 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Pad.hpp" +#include "backends/WorkloadData.hpp" + +#include <boost/numeric/conversion/cast.hpp> +#include "TensorBufferArrayView.hpp" + +#include <cmath> +#include <cstddef> +#include <functional> +#include <limits> +#include <cassert> + + +namespace armnn +{ +void Pad(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + std::vector<std::pair<unsigned int, unsigned int>> m_PadList, + const float* inputData, + float* outData) +{ + unsigned int numOutputElements = outputInfo.GetNumElements(); + + TensorShape outputShape = outputInfo.GetShape(); + TensorShape inputShape = inputInfo.GetShape(); + + unsigned int numInputDimensions = inputShape.GetNumDimensions(); + #ifndef NDEBUG + unsigned int numOutputDimensions = outputShape.GetNumDimensions(); + + assert(numInputDimensions == numOutputDimensions); + #endif + + unsigned int inputBatches = 0; + unsigned int inputChannels = 0; + unsigned int inputHeight = 0; + unsigned int inputWidth = 0; + + unsigned int outputChannels = 0; + unsigned int outputHeight = 0; + unsigned int outputWidth = 0; + + for (unsigned int i = 0; i < numOutputElements; ++i) + { + outData[i] = 0; + } + + switch(numInputDimensions) { + case 1: + + inputWidth = inputShape[0]; + + for (unsigned int w = 0; w < inputWidth ; w++) + { + + outData[w+std::get<0>(m_PadList[0])] = inputData[w]; + + } + + break; + case 2 : + + inputHeight = inputShape[0]; + inputWidth = inputShape[1]; + + outputHeight = outputShape[0]; + outputWidth = outputShape[1]; + + for (unsigned int h = 0; h < inputHeight; h++) + { + + for (unsigned int w = 0; w < inputWidth ; w++) + { + outData[(h+std::get<0>(m_PadList[0]))*outputWidth + + (w+std::get<0>(m_PadList[1]))] = inputData[h * inputWidth + w]; + } + } + + break; + case 3 : + + inputChannels = inputShape[0]; + inputHeight = inputShape[1]; + inputWidth = inputShape[2]; + + outputChannels = outputShape[0]; + outputHeight = outputShape[1]; + outputWidth = outputShape[2]; + + for (unsigned int c = 0; c < inputChannels; c++) + { + + for (unsigned int h = 0; h < inputHeight; h++) + { + + for (unsigned int w = 0; w < inputWidth ; w++) + { + + outData[(c+std::get<0>(m_PadList[0]))*outputHeight*outputWidth + + (h+std::get<0>(m_PadList[1]))*outputWidth + + (w+std::get<0>(m_PadList[2]))] = inputData[c * inputHeight * inputWidth + + h * inputWidth + + w]; + } + } + } + + break; + case 4 : + + inputBatches = inputShape[0]; + inputChannels = inputShape[1]; + inputHeight = inputShape[2]; + inputWidth = inputShape[3]; + + outputChannels = outputShape[1]; + outputHeight = outputShape[2]; + outputWidth = outputShape[3]; + + for (unsigned int b = 0; b < inputBatches; b++) + { + for (unsigned int c = 0; c < inputChannels; c++) + { + + for (unsigned int h = 0; h < inputHeight; h++) + { + + for (unsigned int w = 0; w < inputWidth ; w++) + { + + outData[(b+std::get<0>(m_PadList[0])) * outputChannels * outputHeight * outputWidth + + (c+std::get<0>(m_PadList[1])) * outputHeight * outputWidth + + (h+std::get<0>(m_PadList[2])) * outputWidth + + (w+std::get<0>(m_PadList[3]))] = inputData[b * inputChannels * inputHeight + * inputWidth + + c * inputHeight * inputWidth + + h * inputWidth + + w]; + + } + } + } + } + + break; + + default : + break; + } + +} + +} //namespace armnn diff --git a/src/backends/reference/workloads/Pad.hpp b/src/backends/reference/workloads/Pad.hpp new file mode 100644 index 0000000000..ed80ef8eb0 --- /dev/null +++ b/src/backends/reference/workloads/Pad.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "armnn/DescriptorsFwd.hpp" +#include "armnn/Tensor.hpp" + +#include <vector> + +namespace armnn +{ +void Pad(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + std::vector<std::pair<unsigned int, unsigned int>> m_PadList, + const float* inputData, + float* outData); +} //namespace armnn diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp new file mode 100644 index 0000000000..233fbe4f34 --- /dev/null +++ b/src/backends/reference/workloads/RefPadWorkload.cpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefPadWorkload.hpp" + +#include "Pad.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +RefPadWorkload::RefPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) + :BaseWorkload<PadQueueDescriptor>(descriptor, info) {} + + +void RefPadWorkload::Execute() const +{ + + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPadWorkload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + const float* inputData = GetInputTensorDataFloat(0, m_Data); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + + + Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp new file mode 100644 index 0000000000..7ff117d6a5 --- /dev/null +++ b/src/backends/reference/workloads/RefPadWorkload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefPadWorkload : public BaseWorkload<PadQueueDescriptor> +{ +public: + explicit RefPadWorkload (const PadQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 7e89cabd66..14e6699a73 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -52,4 +52,5 @@ #include "RefConvertFp16ToFp32Workload.hpp" #include "RefConvertFp32ToFp16Workload.hpp" #include "RefMeanUint8Workload.hpp" -#include "RefMeanFloat32Workload.hpp"
\ No newline at end of file +#include "RefMeanFloat32Workload.hpp" +#include "RefPadWorkload.hpp"
\ No newline at end of file |