ArmNN
 20.02
RefQuantizeWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 #include <armnn/TypesUtils.hpp>
11 
12 
13 namespace armnn
14 {
15 
16 namespace
17 {
18 
19 void QuantizeImpl(Decoder<float>& in, Encoder<float>& out, size_t numValues)
20 {
21  for (unsigned int i = 0; i < numValues; i++)
22  {
23  in[i];
24  out[i];
25  out.Set(in.Get());
26  }
27 }
28 
29 } //namespace
30 
32  : BaseWorkload(descriptor, info)
33  , m_NumElements(info.m_InputTensorInfos[0].GetNumElements())
34 {
35 }
36 
38 {
39  const TensorInfo& inputInfo = armnn::GetTensorInfo(m_Data.m_Inputs[0]);
40  m_InputDecoder = MakeDecoder<float>(inputInfo);
41 
42  const TensorInfo& outputInfo = armnn::GetTensorInfo(m_Data.m_Outputs[0]);
43  m_OutputEncoder = MakeEncoder<float>(outputInfo);
44 }
45 
47 {
48  m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
49  m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
50 
51  QuantizeImpl(*m_InputDecoder, *m_OutputEncoder, m_NumElements);
52 }
53 
54 } //namespace armnn
const QuantizeQueueDescriptor m_Data
Definition: Workload.hpp:46
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
void Execute() const override
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
RefQuantizeWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Inputs