ArmNN
 22.02
Encoders.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "BaseIterator.hpp"
9 
11 
12 #include <armnn/utility/Assert.hpp>
13 
14 namespace armnn
15 {
16 
17 template<typename T>
18 inline std::unique_ptr<Encoder<T>> MakeEncoder(const TensorInfo& info, void* data = nullptr);
19 
20 template<>
21 inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void* data)
22 {
23  switch(info.GetDataType())
24  {
26  {
27  return std::make_unique<QASymmS8Encoder>(
28  static_cast<int8_t*>(data),
29  info.GetQuantizationScale(),
30  info.GetQuantizationOffset());
31  }
33  {
34  return std::make_unique<QASymm8Encoder>(
35  static_cast<uint8_t*>(data),
36  info.GetQuantizationScale(),
37  info.GetQuantizationOffset());
38  }
39  case DataType::QSymmS8:
40  {
41  if (info.HasPerAxisQuantization())
42  {
43  std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
44  return std::make_unique<QSymm8PerAxisEncoder>(
45  static_cast<int8_t*>(data),
46  params.second,
47  params.first);
48  }
49  else
50  {
51  return std::make_unique<QSymmS8Encoder>(
52  static_cast<int8_t*>(data),
53  info.GetQuantizationScale(),
54  info.GetQuantizationOffset());
55  }
56  }
58  {
59  return std::make_unique<QSymm16Encoder>(
60  static_cast<int16_t*>(data),
61  info.GetQuantizationScale(),
62  info.GetQuantizationOffset());
63  }
65  {
66  return std::make_unique<Int32Encoder>(static_cast<int32_t*>(data));
67  }
69  {
70  return std::make_unique<BFloat16Encoder>(static_cast<armnn::BFloat16*>(data));
71  }
73  {
74  return std::make_unique<Float16Encoder>(static_cast<Half*>(data));
75  }
77  {
78  return std::make_unique<Float32Encoder>(static_cast<float*>(data));
79  }
80  default:
81  {
82  ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
83  break;
84  }
85  }
86  return nullptr;
87 }
88 
89 template<>
90 inline std::unique_ptr<Encoder<bool>> MakeEncoder(const TensorInfo& info, void* data)
91 {
92  switch(info.GetDataType())
93  {
95  {
96  return std::make_unique<BooleanEncoder>(static_cast<uint8_t*>(data));
97  }
98  default:
99  {
100  ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
101  break;
102  }
103  }
104  return nullptr;
105 }
106 
107 template<>
108 inline std::unique_ptr<Encoder<int32_t>> MakeEncoder(const TensorInfo& info, void* data)
109 {
110  switch(info.GetDataType())
111  {
112  case DataType::Signed32:
113  {
114  return std::make_unique<Int32ToInt32tEncoder>(static_cast<int32_t*>(data));
115  }
116  default:
117  {
118  ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
119  break;
120  }
121  }
122  return nullptr;
123 }
124 
125 } //namespace armnn
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:448
std::pair< unsigned int, std::vector< float > > GetPerAxisParams(const armnn::TensorInfo &info)
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
DataType GetDataType() const
Definition: Tensor.hpp:198
std::unique_ptr< Encoder< T > > MakeEncoder(const TensorInfo &info, void *data=nullptr)
Definition: Encoders.hpp:21
half_float::half Half
Definition: Half.hpp:18