diff options
author | David Beck <david.beck@arm.com> | 2018-09-24 13:18:27 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:57 +0100 |
commit | b4540bef0b0327683fe8e63f727c1212800dc2a9 (patch) | |
tree | e1ea8bb6ee981640a1c469ceb556ed648ffde411 /src/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp | |
parent | 2d9dd36fb6bc20b370701ab15463359b9db35f18 (diff) | |
download | armnn-b4540bef0b0327683fe8e63f727c1212800dc2a9.tar.gz |
IVGCVSW-1898 : Ref backend folder structure
* Reference backend is renamed to backends/reference as per
https://confluence.arm.com/display/MLENG/Pluggable+backends
Change-Id: I27a13c274eb60995dfb459e3c49c0e2f60bcd32c
Diffstat (limited to 'src/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp')
-rw-r--r-- | src/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp | 42 |
1 files changed, 0 insertions, 42 deletions
diff --git a/src/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp b/src/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp deleted file mode 100644 index 3e16f60b11..0000000000 --- a/src/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefFakeQuantizationFloat32Workload.hpp" - -#include "RefWorkloadUtils.hpp" - -#include "Profiling.hpp" - -#include <boost/numeric/conversion/cast.hpp> - -namespace armnn -{ - -void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max) -{ - float scale = (max - min) / 255.f; - int32_t offset = boost::numeric_cast<int32_t>((-min * 255.f) / (max - min)); - - for (uint32_t i = 0; i < numElements; i++) - { - outputData[i] = static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset)); - } - -} - -void RefFakeQuantizationFloat32Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute"); - - const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); - - const float* inputData = GetInputTensorDataFloat(0, m_Data); - float* outputData = GetOutputTensorDataFloat(0, m_Data); - FakeQuantization(inputData, outputData, inputInfo.GetNumElements(), - m_Data.m_Parameters.m_Min, - m_Data.m_Parameters.m_Max); -} - -} //namespace armnn |