blob: 2eef5f33db050ea59aa36c5edc7879d1147adb64 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RefQuantizeWorkload.hpp"
#include "RefWorkloadUtils.hpp"
#include <armnn/TypesUtils.hpp>
namespace armnn
{
namespace
{
void QuantizeImpl(Decoder<float>& in, Encoder<float>& out, size_t numValues)
{
for (unsigned int i = 0; i < numValues; i++)
{
in[i];
out[i];
out.Set(in.Get());
}
}
} //namespace
RefQuantizeWorkload::RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info)
: BaseWorkload(descriptor, info)
, m_NumElements(info.m_InputTensorInfos[0].GetNumElements())
{
}
void RefQuantizeWorkload::PostAllocationConfigure()
{
const TensorInfo& inputInfo = armnn::GetTensorInfo(m_Data.m_Inputs[0]);
m_InputDecoder = MakeDecoder<float>(inputInfo);
const TensorInfo& outputInfo = armnn::GetTensorInfo(m_Data.m_Outputs[0]);
m_OutputEncoder = MakeEncoder<float>(outputInfo);
}
void RefQuantizeWorkload::Execute() const
{
m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
QuantizeImpl(*m_InputDecoder, *m_OutputEncoder, m_NumElements);
}
} //namespace armnn
|