diff options
author | narpra01 <narumol.prangnawarat@arm.com> | 2018-09-28 11:07:51 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:57 +0100 |
commit | 1e4c31dafb1c8984a126fa1d211ed8f9eedaf7cc (patch) | |
tree | 006e40b3bbfdc4a202cdada8fa9afec0dd8fffae /src/backends/reference | |
parent | 33cea4db0b2729c5dbd50f9c0985578c60baffdd (diff) | |
download | armnn-1e4c31dafb1c8984a126fa1d211ed8f9eedaf7cc.tar.gz |
IVGCVSW-1812 Adding Ref implementation and tests of MeanWorkloads
Change-Id: I6fb15c407024e3b91d5abf4513f8090be5821760
Diffstat (limited to 'src/backends/reference')
-rw-r--r-- | src/backends/reference/RefLayerSupport.cpp | 7 | ||||
-rw-r--r-- | src/backends/reference/RefWorkloadFactory.cpp | 2 | ||||
-rw-r--r-- | src/backends/reference/backend.mk | 3 | ||||
-rw-r--r-- | src/backends/reference/workloads/CMakeLists.txt | 6 | ||||
-rw-r--r-- | src/backends/reference/workloads/Mean.cpp | 136 | ||||
-rw-r--r-- | src/backends/reference/workloads/Mean.hpp | 21 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefMeanFloat32Workload.cpp | 35 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefMeanFloat32Workload.hpp | 22 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefMeanUint8Workload.cpp | 39 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefMeanUint8Workload.hpp | 21 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefWorkloads.hpp | 2 |
11 files changed, 292 insertions, 2 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index d56cdebeda..12a2817774 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -392,7 +392,12 @@ bool IsMeanSupportedRef(const TensorInfo& input, const MeanDescriptor& descriptor, std::string* reasonIfUnsupported) { - return false; + ignore_unused(output); + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } } diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 5cefd1b6e1..582c691a18 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -242,7 +242,7 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateSubtraction( std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMean( const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info); + return MakeWorkload<RefMeanFloat32Workload, RefMeanUint8Workload>(descriptor, info); } std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor, diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 23dab119d0..e5345c07d5 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -15,6 +15,7 @@ BACKEND_SOURCES := \ workloads/Broadcast.cpp \ workloads/ConvImpl.cpp \ workloads/FullyConnected.cpp \ + workloads/Mean.cpp \ workloads/Pooling2d.cpp \ workloads/RefActivationFloat32Workload.cpp \ workloads/RefActivationUint8Workload.cpp \ @@ -36,6 +37,8 @@ BACKEND_SOURCES := \ workloads/RefFullyConnectedUint8Workload.cpp \ workloads/RefL2NormalizationFloat32Workload.cpp \ workloads/RefLstmFloat32Workload.cpp \ + workloads/RefMeanFloat32Workload.cpp \ + workloads/RefMeanUint8Workload.cpp \ workloads/RefMergerFloat32Workload.cpp \ workloads/RefMergerUint8Workload.cpp \ workloads/RefNormalizationFloat32Workload.cpp \ diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 7343b70daf..5a756e4596 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -94,6 +94,12 @@ list(APPEND armnnRefBackendWorkloads_sources Softmax.hpp Splitter.hpp TensorBufferArrayView.hpp + Mean.cpp + Mean.hpp + RefMeanFloat32Workload.cpp + RefMeanFloat32Workload.hpp + RefMeanUint8Workload.cpp + RefMeanUint8Workload.hpp ) add_library(armnnRefBackendWorkloads STATIC ${armnnRefBackendWorkloads_sources}) diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp new file mode 100644 index 0000000000..0db67a0eed --- /dev/null +++ b/src/backends/reference/workloads/Mean.cpp @@ -0,0 +1,136 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Mean.hpp" +#include "backends/WorkloadData.hpp" + +#include <boost/numeric/conversion/cast.hpp> + +#include <cmath> +#include <cstddef> +#include <functional> +#include <limits> + +namespace armnn +{ +bool NextIndex(const unsigned int numDims, const armnn::TensorShape& dims, std::vector<unsigned int>& current) +{ + unsigned int carry = 1; + + for (unsigned int idx = numDims; idx-- > 0; ) + { + unsigned int current_val = current[idx] + carry; + if (dims[idx] == current_val) + { + current[idx] = 0; + } + else + { + current[idx] = current_val; + carry = 0; + break; + } + } + return (carry == 0); +} + +std::size_t ReducedOutputOffset(const unsigned int numDims, const armnn::TensorShape& dims, + std::vector<unsigned int>& index, const unsigned int numAxis, + const std::vector<unsigned int>& axis) { + std::size_t offset = 0; + for (unsigned int idx = 0; idx < numDims; ++idx) + { + bool isAxis = false; + if (!axis.empty()) + { + for (unsigned int axisIdx = 0; axisIdx < numAxis; ++axisIdx) + { + if (idx == axis[axisIdx]) + { + isAxis = true; + break; + } + } + } + if (!isAxis) + { + offset = offset * boost::numeric_cast<size_t>(dims[idx]) + boost::numeric_cast<size_t>(index[idx]); + } + } + return offset; +} +} // namespace + +namespace armnn +{ +void Mean(const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const std::vector<unsigned int>& axis, + const float* inputData, + float* outputData) { + + unsigned int inputNumDims = inputInfo.GetNumDimensions(); + unsigned int outputNumDims = outputInfo.GetNumDimensions(); + + armnn::TensorShape outputDims = outputInfo.GetShape(); + armnn::TensorShape inputDims = inputInfo.GetShape(); + + // Initialise output data. + size_t numOutputs = 1; + for (unsigned int idx = 0; idx < outputNumDims; ++idx) + { + numOutputs *= boost::numeric_cast<size_t>(outputDims[idx]); + } + + std::vector<float> tempSum(numOutputs); + for (size_t idx = 0; idx < numOutputs; ++idx) + { + outputData[idx] = 0.0f; + tempSum[idx] = 0.0f; + } + + // Initialise temp index. + std::vector<unsigned int> tempIndex(inputNumDims); + for (unsigned int idx = 0; idx < inputNumDims; ++idx) + { + tempIndex[idx] = 0; + } + + std::vector<unsigned int> resolvedAxis = axis; + if (resolvedAxis.empty()) + { + for (unsigned int idx = 0; idx < inputNumDims; ++idx) + { + resolvedAxis.push_back(idx); + } + } + unsigned int numResolvedAxis = boost::numeric_cast<unsigned int>(resolvedAxis.size()); + + // Iterates through input_data and sum up the reduced axis. + for (bool hasNext = true; hasNext; hasNext = NextIndex(inputNumDims, inputDims, tempIndex)) + { + size_t inputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex, 0, {}); + size_t outputOffset = ReducedOutputOffset(inputNumDims, inputDims, tempIndex, + numResolvedAxis, resolvedAxis); + tempSum[outputOffset] += inputData[inputOffset]; + } + + // Takes average by num of elements added to get mean. + size_t numElementsInAxis = 1; + for (unsigned int idx = 0; idx < numResolvedAxis; ++idx) + { + size_t current = boost::numeric_cast<size_t>(inputDims[resolvedAxis[idx]]); + BOOST_ASSERT(boost::numeric_cast<float>(current) < + (std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis))); + numElementsInAxis *= current; + } + if (numElementsInAxis > 0) { + for (size_t idx = 0; idx < numOutputs; ++idx) + { + outputData[idx] = tempSum[idx] / boost::numeric_cast<float>(numElementsInAxis); + } + } +} +} //namespace armnn diff --git a/src/backends/reference/workloads/Mean.hpp b/src/backends/reference/workloads/Mean.hpp new file mode 100644 index 0000000000..38c2e39653 --- /dev/null +++ b/src/backends/reference/workloads/Mean.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "armnn/DescriptorsFwd.hpp" +#include "armnn/Tensor.hpp" + +#include <vector> + +namespace armnn +{ +void Mean(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const std::vector<unsigned int>& axis, + const float* inputData, + float* outputData); +} //namespace armnn + diff --git a/src/backends/reference/workloads/RefMeanFloat32Workload.cpp b/src/backends/reference/workloads/RefMeanFloat32Workload.cpp new file mode 100644 index 0000000000..a23906b8aa --- /dev/null +++ b/src/backends/reference/workloads/RefMeanFloat32Workload.cpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefMeanFloat32Workload.hpp" + +#include "Mean.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" +#include "vector" + +namespace armnn +{ + +RefMeanFloat32Workload::RefMeanFloat32Workload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) + :Float32Workload<MeanQueueDescriptor>(descriptor, info) {} + + +void RefMeanFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + + Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, inputData, outputData); +} + +} //namespace armnn + + diff --git a/src/backends/reference/workloads/RefMeanFloat32Workload.hpp b/src/backends/reference/workloads/RefMeanFloat32Workload.hpp new file mode 100644 index 0000000000..a4c559f0c6 --- /dev/null +++ b/src/backends/reference/workloads/RefMeanFloat32Workload.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + + +class RefMeanFloat32Workload : public Float32Workload<MeanQueueDescriptor> +{ +public: + explicit RefMeanFloat32Workload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; +}; + +}//namespace armnn diff --git a/src/backends/reference/workloads/RefMeanUint8Workload.cpp b/src/backends/reference/workloads/RefMeanUint8Workload.cpp new file mode 100644 index 0000000000..4ebffcfd70 --- /dev/null +++ b/src/backends/reference/workloads/RefMeanUint8Workload.cpp @@ -0,0 +1,39 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefMeanUint8Workload.hpp" + +#include "Mean.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +RefMeanUint8Workload::RefMeanUint8Workload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) + :Uint8Workload<MeanQueueDescriptor>(descriptor, info) {} + + +void RefMeanUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanUint8Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo); + + std::vector<float> results(outputInfo.GetNumElements()); + + Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, dequant.data(), results.data()); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn + diff --git a/src/backends/reference/workloads/RefMeanUint8Workload.hpp b/src/backends/reference/workloads/RefMeanUint8Workload.hpp new file mode 100644 index 0000000000..21cf72b38f --- /dev/null +++ b/src/backends/reference/workloads/RefMeanUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefMeanUint8Workload : public Uint8Workload<MeanQueueDescriptor> +{ +public: + explicit RefMeanUint8Workload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index e5c6e1e9d5..7e89cabd66 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -51,3 +51,5 @@ #include "RefLstmFloat32Workload.hpp" #include "RefConvertFp16ToFp32Workload.hpp" #include "RefConvertFp32ToFp16Workload.hpp" +#include "RefMeanUint8Workload.hpp" +#include "RefMeanFloat32Workload.hpp"
\ No newline at end of file |