aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp
blob: 088fe819e56721d99656200cc935d8b76bddcd97 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// See LICENSE file in the project root for full license information.
//

#pragma once

#include "backends/CpuTensorHandle.hpp"

#include <armnn/Tensor.hpp>
#include <armnn/Types.hpp>

#include <boost/polymorphic_cast.hpp>

namespace armnn
{

////////////////////////////////////////////
/// float32 helpers
////////////////////////////////////////////

inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle)
{
    // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate.
    const ConstCpuTensorHandle* cpuTensorHandle =
        boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle);
    return cpuTensorHandle->GetTensorInfo();
}

template <typename DataType>
inline const DataType* GetConstCpuData(const ITensorHandle* tensorHandle)
{
    // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate.
    const ConstCpuTensorHandle* cpuTensorHandle =
        boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle);
    return cpuTensorHandle->GetConstTensor<DataType>();
}

template <typename DataType>
inline DataType* GetCpuData(const ITensorHandle* tensorHandle)
{
    // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate.
    const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast<const CpuTensorHandle*>(tensorHandle);
    return cpuTensorHandle->GetTensor<DataType>();
};

template <typename DataType, typename PayloadType>
const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data)
{
    const ITensorHandle* tensorHandle = data.m_Inputs[idx];
    return GetConstCpuData<DataType>(tensorHandle);
}

template <typename DataType, typename PayloadType>
DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
{
    const ITensorHandle* tensorHandle = data.m_Outputs[idx];
    return GetCpuData<DataType>(tensorHandle);
}

template <typename PayloadType>
const float* GetInputTensorDataFloat(unsigned int idx, const PayloadType& data)
{
    return GetInputTensorData<float>(idx, data);
}

template <typename PayloadType>
float* GetOutputTensorDataFloat(unsigned int idx, const PayloadType& data)
{
    return GetOutputTensorData<float>(idx, data);
}

////////////////////////////////////////////
/// u8 helpers
////////////////////////////////////////////

inline const uint8_t* GetConstCpuU8Data(const ITensorHandle* tensorHandle)
{
    // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate.
    const ConstCpuTensorHandle* cpuTensorHandle =
        boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle);
    return cpuTensorHandle->GetConstTensor<uint8_t>();
};

inline uint8_t* GetCpuU8Data(const ITensorHandle* tensorHandle)
{
    // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate.
    const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast<const CpuTensorHandle*>(tensorHandle);
    return cpuTensorHandle->GetTensor<uint8_t>();
};

template <typename PayloadType>
const uint8_t* GetInputTensorDataU8(unsigned int idx, const PayloadType& data)
{
    const ITensorHandle* tensorHandle = data.m_Inputs[idx];
    return GetConstCpuU8Data(tensorHandle);
}

template <typename PayloadType>
uint8_t* GetOutputTensorDataU8(unsigned int idx, const PayloadType& data)
{
    const ITensorHandle* tensorHandle = data.m_Outputs[idx];
    return GetCpuU8Data(tensorHandle);
}

template<typename T>
std::vector<float> Dequantize(const T* quant, const TensorInfo& info)
{
    std::vector<float> ret(info.GetNumElements());
    for (size_t i = 0; i < info.GetNumElements(); i++)
    {
        ret[i] = armnn::Dequantize(quant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
    }
    return ret;
}

inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info)
{
    for (size_t i = 0; i < info.GetNumElements(); i++)
    {
        quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
    }
}

} //namespace armnn