23.02
RefDequantizeWorkload.cpp
Go to the documentation of this file.
1
//
2
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3
// SPDX-License-Identifier: MIT
4
//
5
6
#include "
RefDequantizeWorkload.hpp
"
7
#include "
RefWorkloadUtils.hpp
"
8
#include "
Encoders.hpp
"
9
#include "
Decoders.hpp
"
10
#include "
Dequantize.hpp
"
11
12
namespace
armnn
13
{
14
15
void
RefDequantizeWorkload::Execute
()
const
16
{
17
Execute
(
m_Data
.
m_Inputs
,
m_Data
.
m_Outputs
);
18
}
19
20
void
RefDequantizeWorkload::ExecuteAsync
(
ExecutionData
& executionData)
21
{
22
WorkingMemDescriptor
* workingMemDescriptor =
static_cast<
WorkingMemDescriptor
*
>
(executionData.
m_Data
);
23
Execute
(workingMemDescriptor->
m_Inputs
, workingMemDescriptor->
m_Outputs
);
24
}
25
26
void
RefDequantizeWorkload::Execute
(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs)
const
27
{
28
ARMNN_SCOPED_PROFILING_EVENT
(
Compute::CpuRef
,
"RefDequantizeWorkload_Execute"
);
29
30
const
TensorInfo
& inputInfo =
GetTensorInfo
(inputs[0]);
31
const
TensorInfo
& outputInfo =
GetTensorInfo
(outputs[0]);
32
33
auto
inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->
Map
());
34
auto
outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->
Map
());
35
36
Dequantize
(*inputDecoder, *outputEncoder, inputInfo, outputInfo);
37
}
38
39
}
// namespace armnn
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition:
RefWorkloadUtils.hpp:27
armnn::LayerType::Map
@ Map
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition:
WorkingMemDescriptor.hpp:20
armnn::experimental::ExecutionData
Definition:
ExecutionData.hpp:14
RefDequantizeWorkload.hpp
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition:
ExecutionData.hpp:16
armnn::experimental::WorkingMemDescriptor
Definition:
WorkingMemDescriptor.hpp:18
armnn::BaseWorkload< DequantizeQueueDescriptor >::m_Data
DequantizeQueueDescriptor m_Data
Definition:
Workload.hpp:83
Dequantize.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition:
01_00_quick_start.dox:6
RefWorkloadUtils.hpp
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition:
Profiling.hpp:220
Encoders.hpp
armnn::TensorInfo
Definition:
Tensor.hpp:152
armnn::RefDequantizeWorkload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition:
RefDequantizeWorkload.cpp:20
armnn::RefDequantizeWorkload::Execute
void Execute() const override
Definition:
RefDequantizeWorkload.cpp:15
Decoders.hpp
armnn::experimental::WorkingMemDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition:
WorkingMemDescriptor.hpp:21
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition:
WorkloadData.hpp:27
armnn::Dequantize
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition:
TypesUtils.cpp:46
armnn::Compute::CpuRef
@ CpuRef
CPU Execution: Reference C++ kernels.
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition:
WorkloadData.hpp:26
src
backends
reference
workloads
RefDequantizeWorkload.cpp
Generated on Wed Mar 22 2023 15:53:02 for ArmNN by
1.8.17