ArmNN
 22.11
RefDequantizeWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 #include "Encoders.hpp"
9 #include "Decoders.hpp"
10 #include "Dequantize.hpp"
11 
12 namespace armnn
13 {
14 
16 {
18 }
19 
21 {
22  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
23  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
24 }
25 
26 void RefDequantizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
27 {
28  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute");
29 
30  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
31  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
32 
33  auto inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
34  auto outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
35 
36  Dequantize(*inputDecoder, *outputEncoder, inputInfo, outputInfo);
37 }
38 
39 } // namespace armnn
CPU Execution: Reference C++ kernels.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
void ExecuteAsync(ExecutionData &executionData) override
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers