ArmNN
 20.02
RefDequantizeWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 #include "Encoders.hpp"
9 #include "Decoders.hpp"
10 #include "Dequantize.hpp"
11 
12 namespace armnn
13 {
14 
16 {
17  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute");
18 
19  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
20  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
21 
22  auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
23  auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
24 
25  Dequantize(*inputDecoder, *outputEncoder, inputInfo, outputInfo);
26 }
27 
28 } // namespace armnn
CPU Execution: Reference C++ kernels.
const DequantizeQueueDescriptor m_Data
Definition: Workload.hpp:46
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:169
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs