From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- 20.02/_decoders_8hpp_source.xhtml | 137 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 20.02/_decoders_8hpp_source.xhtml (limited to '20.02/_decoders_8hpp_source.xhtml') diff --git a/20.02/_decoders_8hpp_source.xhtml b/20.02/_decoders_8hpp_source.xhtml new file mode 100644 index 0000000000..90ea5c7d5b --- /dev/null +++ b/20.02/_decoders_8hpp_source.xhtml @@ -0,0 +1,137 @@ + + + + + + + + + + + + + +ArmNN: src/backends/reference/workloads/Decoders.hpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
Decoders.hpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "BaseIterator.hpp"
9 
12 
13 #include <boost/assert.hpp>
14 
15 namespace armnn
16 {
17 
18 namespace
19 {
20 
21 inline std::unique_ptr<Decoder<float>> MakeSigned32PerAxisDecoder(const TensorInfo& info, const void* data)
22 {
23  auto params = armnnUtils::GetPerAxisParams(info);
24  return std::make_unique<ScaledInt32PerAxisDecoder>(
25  static_cast<const int32_t*>(data),
26  params.second,
27  params.first);
28 }
29 
30 inline std::unique_ptr<Decoder<float>> MakeSigned32Decoder(const TensorInfo& info, const void* data)
31 {
32  if(info.HasMultipleQuantizationScales())
33  {
34  // NOTE: If we have multiple quantization scales, we create a ScaledInt32PerAxisDecoder.
35  // This will be used to decode per-axis quantized convolution biases.
36  return MakeSigned32PerAxisDecoder(info, data);
37  }
38  else
39  {
40  if (info.GetQuantizationDim().has_value())
41  {
42  // NOTE: Even though we only have a single quantization scale, if the quantization
43  // dimension is set, the tensor has per-axis quantization and we need to create a
44  // ScaledInt32PerAxisDecoder
45  return MakeSigned32PerAxisDecoder(info, data);
46  }
47 
48  const float scale = info.GetQuantizationScale();
49  if (scale == 0.f)
50  {
51  // NOTE:: If no quantization scale is set, we create an Int32Decoder, which simply
52  // casts the int value to float. This will be used for any INT32 data other than
53  // convolution biases.
54  return std::make_unique<Int32Decoder>(static_cast<const int32_t*>(data));
55  }
56 
57  // NOTE: If we only have a single (non-zero) quantization scale and no quantization
58  // dimension is specified, we need to create a ScaledInt32Decoder. This will be used
59  // to decode per-tensor quantized convolution biases.
60  return std::make_unique<ScaledInt32Decoder>(static_cast<const int32_t*>(data), scale);
61  }
62 }
63 
64 } // anonymous namespace
65 
66 template<typename T>
67 inline std::unique_ptr<Decoder<T>> MakeDecoder(const TensorInfo& info, const void* data = nullptr);
68 
69 template<>
70 inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const void* data)
71 {
72  switch(info.GetDataType())
73  {
76  {
77  std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
78  return std::make_unique<QSymm8PerAxisDecoder>(
79  static_cast<const int8_t*>(data),
80  params.second,
81  params.first);
82  }
84  case DataType::QAsymmS8:
85  {
86  return std::make_unique<QASymmS8Decoder>(
87  static_cast<const int8_t*>(data),
88  info.GetQuantizationScale(),
89  info.GetQuantizationOffset());
90  }
91  case DataType::QAsymmU8:
92  {
93  return std::make_unique<QASymm8Decoder>(
94  static_cast<const uint8_t*>(data),
95  info.GetQuantizationScale(),
96  info.GetQuantizationOffset());
97  }
98  case DataType::QSymmS16:
99  {
100  return std::make_unique<QSymm16Decoder>(
101  static_cast<const int16_t*>(data),
102  info.GetQuantizationScale(),
103  info.GetQuantizationOffset());
104  }
105  case DataType::BFloat16:
106  {
107  return std::make_unique<BFloat16Decoder>(static_cast<const BFloat16*>(data));
108  }
109  case DataType::Float16:
110  {
111  return std::make_unique<Float16Decoder>(static_cast<const Half*>(data));
112  }
113  case DataType::Float32:
114  {
115  return std::make_unique<Float32Decoder>(static_cast<const float*>(data));
116  }
117  case DataType::Signed32:
118  {
119  return MakeSigned32Decoder(info, data);
120  }
121  case DataType::QSymmS8:
122  {
123  if (info.HasPerAxisQuantization())
124  {
125  std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
126  return std::make_unique<QSymm8PerAxisDecoder>(
127  static_cast<const int8_t*>(data),
128  params.second,
129  params.first);
130  }
131  else
132  {
133  return std::make_unique<QSymmS8Decoder>(
134  static_cast<const int8_t*>(data),
135  info.GetQuantizationScale(),
136  info.GetQuantizationOffset());
137  }
138  }
139  default:
140  {
141  BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
142  break;
143  }
144  }
145  return nullptr;
146 }
147 
148 } //namespace armnn
+
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
+ + +
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:232
+ + + +
std::pair< unsigned int, std::vector< float > > GetPerAxisParams(const armnn::TensorInfo &info)
+ +
Copyright (c) 2020 ARM Limited.
+
std::unique_ptr< Decoder< T > > MakeDecoder(const TensorInfo &info, const void *data=nullptr)
Definition: Decoders.hpp:70
+ +
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
+
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
+
float GetQuantizationScale() const
Definition: Tensor.cpp:247
+
DataType GetDataType() const
Definition: Tensor.hpp:95
+ + + + + + +
half_float::half Half
Definition: Half.hpp:16
+
+
+ + + + -- cgit v1.2.1