diff options
author | David Beck <david.beck@arm.com> | 2018-09-06 16:46:34 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-09-25 14:54:29 +0100 |
commit | f195f03e095a5c4dc6880be11af64cab83b5c94b (patch) | |
tree | d1c6d7d46ed70b915772bd50c5074d13443d9bca /src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp | |
parent | c2044fe9d26a8b6afca48aee04bd5d29f8e27b8d (diff) | |
download | armnn-f195f03e095a5c4dc6880be11af64cab83b5c94b.tar.gz |
IVGCVSW-1803 : add Ref Subtraction layer
Change-Id: I4c019d626f9369245eca6d549bbe7a28e141f198
Diffstat (limited to 'src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp')
-rw-r--r-- | src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp new file mode 100644 index 0000000000..8066762e48 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp @@ -0,0 +1,41 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefSubtractionUint8Workload.hpp" + +#include "Subtraction.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefSubtractionUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSubtractionUint8Workload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0); + auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1); + + std::vector<float> results(outputInfo.GetNumElements()); + + Subtraction(inputInfo0.GetShape(), + inputInfo1.GetShape(), + outputInfo.GetShape(), + dequant0.data(), + dequant1.data(), + results.data()); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn |