diff options
Diffstat (limited to 'src/backends/reference/workloads')
8 files changed, 282 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 7343b70daf..5a756e4596 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -94,6 +94,12 @@ list(APPEND armnnRefBackendWorkloads_sources Softmax.hpp Splitter.hpp TensorBufferArrayView.hpp + Mean.cpp + Mean.hpp + RefMeanFloat32Workload.cpp + RefMeanFloat32Workload.hpp + RefMeanUint8Workload.cpp + RefMeanUint8Workload.hpp ) add_library(armnnRefBackendWorkloads STATIC ${armnnRefBackendWorkloads_sources}) diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp new file mode 100644 index 0000000000..0db67a0eed --- /dev/null +++ b/src/backends/reference/workloads/Mean.cpp @@ -0,0 +1,136 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Mean.hpp" +#include "backends/WorkloadData.hpp" + +#include <boost/numeric/conversion/cast.hpp> + +#include <cmath> +#include <cstddef> +#include <functional> +#include <limits> + +namespace armnn +{ +bool NextIndex(const unsigned int numDims, const armnn::TensorShape& dims, std::vector<unsigned int>& current) +{ + unsigned int carry = 1; + + for (unsigned int idx = numDims; idx-- > 0; ) + { + unsigned int current_val = current[idx] + carry; + if (dims[idx] == current_val) + { + current[idx] = 0; + } + else + { + current[idx] = current_val; + carry = 0; + break; + } + } + return (carry == 0); +} + +std::size_t ReducedOutputOffset(const unsigned int numDims, const armnn::TensorShape& dims, + std::vector<unsigned int>& index, const unsigned int numAxis, + const std::vector<unsigned int>& axis) { + std::size_t offset = 0; + for (unsigned int idx = 0; idx < numDims; ++idx) + { + bool isAxis = false; + if (!axis.empty()) + { + for (unsigned int axisIdx = 0; axisIdx < numAxis; ++axisIdx) + { + if (idx == axis[axisIdx]) + { + isAxis = true; + break; + } + } + } + if (!isAxis) + { + offset = offset * boost::numeric_cast<size_t>(dims[idx]) + boost::numeric_cast<size_t>(index[idx]); + } + } + return offset; +} +} // namespace + +namespace armnn +{ +void Mean(const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const std::vector<unsigned int>& axis, + const float* inputData, + float* outputData) { + + unsigned int inputNumDims = inputInfo.GetNumDimensions(); + unsigned int outputNumDims = outputInfo.GetNumDimensions(); + + armnn::TensorShape outputDims = outputInfo.GetShape(); + armnn::TensorShape inputDims = inputInfo.GetShape(); + + // Initialise output data. + size_t numOutputs = 1; + for (unsigned int idx = 0; idx < outputNumDims; ++idx) + { + numOutputs *= boost::numeric_cast<size_t>(outputDims[idx]); + } + + std::vector<float> tempSum(numOutputs); + for (size_t idx = 0; idx < numOutputs; ++idx) + { + outputData[idx] = 0.0f; + tempSum[idx] = 0.0f; + } + + // Initialise temp index. + std::vector<unsigned int> tempIndex(inputNumDims); + for (unsigned int idx = 0; idx < inputNumDims; ++idx) + { + tempIndex[idx] = 0; + } + + std::vector<unsigned int> resolvedAxis = axis; + if (resolvedAxis.empty()) + { + for (unsigned int idx = 0; idx < inputNumDims; ++idx) + { + resolvedAxis.push_back(idx); + } + } + unsigned int numResolvedAxis = boost::numeric_cast<unsigned int>(resolvedAxis.size()); + + // Iterates through input_data and sum up the reduced axis. + for (bool hasNext = true; hasNext; hasNext = NextIndex(inputNumDims, inputDims, tempIndex)) + { + size_t inputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex, 0, {}); + size_t outputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex, + numResolvedAxis, resolvedAxis); + tempSum[outputOffset] += inputData[inputOffset]; + } + + // Takes average by num of elements added to get mean. + size_t numElementsInAxis = 1; + for (unsigned int idx = 0; idx < numResolvedAxis; ++idx) + { + size_t current = boost::numeric_cast<size_t>(inputDims[resolvedAxis[idx]]); + BOOST_ASSERT(boost::numeric_cast<float>(current) < + (std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis))); + numElementsInAxis *= current; + } + if (numElementsInAxis > 0) { + for (size_t idx = 0; idx < numOutputs; ++idx) + { + outputData[idx] = tempSum[idx] / boost::numeric_cast<float>(numElementsInAxis); + } + } +} +} //namespace armnn diff --git a/src/backends/reference/workloads/Mean.hpp b/src/backends/reference/workloads/Mean.hpp new file mode 100644 index 0000000000..38c2e39653 --- /dev/null +++ b/src/backends/reference/workloads/Mean.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "armnn/DescriptorsFwd.hpp" +#include "armnn/Tensor.hpp" + +#include <vector> + +namespace armnn +{ +void Mean(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const std::vector<unsigned int>& axis, + const float* inputData, + float* outputData); +} //namespace armnn + diff --git a/src/backends/reference/workloads/RefMeanFloat32Workload.cpp b/src/backends/reference/workloads/RefMeanFloat32Workload.cpp new file mode 100644 index 0000000000..a23906b8aa --- /dev/null +++ b/src/backends/reference/workloads/RefMeanFloat32Workload.cpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefMeanFloat32Workload.hpp" + +#include "Mean.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" +#include "vector" + +namespace armnn +{ + +RefMeanFloat32Workload::RefMeanFloat32Workload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) + :Float32Workload<MeanQueueDescriptor>(descriptor, info) {} + + +void RefMeanFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + + Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, inputData, outputData); +} + +} //namespace armnn + + diff --git a/src/backends/reference/workloads/RefMeanFloat32Workload.hpp b/src/backends/reference/workloads/RefMeanFloat32Workload.hpp new file mode 100644 index 0000000000..a4c559f0c6 --- /dev/null +++ b/src/backends/reference/workloads/RefMeanFloat32Workload.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + + +class RefMeanFloat32Workload : public Float32Workload<MeanQueueDescriptor> +{ +public: + explicit RefMeanFloat32Workload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; +}; + +}//namespace armnn diff --git a/src/backends/reference/workloads/RefMeanUint8Workload.cpp b/src/backends/reference/workloads/RefMeanUint8Workload.cpp new file mode 100644 index 0000000000..4ebffcfd70 --- /dev/null +++ b/src/backends/reference/workloads/RefMeanUint8Workload.cpp @@ -0,0 +1,39 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefMeanUint8Workload.hpp" + +#include "Mean.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +RefMeanUint8Workload::RefMeanUint8Workload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) + :Uint8Workload<MeanQueueDescriptor>(descriptor, info) {} + + +void RefMeanUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanUint8Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo); + + std::vector<float> results(outputInfo.GetNumElements()); + + Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, dequant.data(), results.data()); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn + diff --git a/src/backends/reference/workloads/RefMeanUint8Workload.hpp b/src/backends/reference/workloads/RefMeanUint8Workload.hpp new file mode 100644 index 0000000000..21cf72b38f --- /dev/null +++ b/src/backends/reference/workloads/RefMeanUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefMeanUint8Workload : public Uint8Workload<MeanQueueDescriptor> +{ +public: + explicit RefMeanUint8Workload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index e5c6e1e9d5..7e89cabd66 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -51,3 +51,5 @@ #include "RefLstmFloat32Workload.hpp" #include "RefConvertFp16ToFp32Workload.hpp" #include "RefConvertFp32ToFp16Workload.hpp" +#include "RefMeanUint8Workload.hpp" +#include "RefMeanFloat32Workload.hpp"
\ No newline at end of file |