ArmNN
 21.11
BaseIterator.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/TypesUtils.hpp>
14 
15 #include <ResolveType.hpp>
16 
17 namespace armnn
18 {
19 
21 {
22 public:
24 
25  virtual ~BaseIterator() {}
26 
27  virtual BaseIterator& operator++() = 0;
28 
29  virtual BaseIterator& operator+=(const unsigned int increment) = 0;
30 
31  virtual BaseIterator& operator-=(const unsigned int increment) = 0;
32 
33  virtual BaseIterator& operator[](const unsigned int index) = 0;
34 };
35 
36 template<typename IType>
37 class Decoder : public BaseIterator
38 {
39 public:
40  Decoder() {}
41 
42  virtual ~Decoder() {}
43 
44  virtual void Reset(void*) = 0;
45 
46  virtual IType Get() const = 0;
47 
48  virtual std::vector<float>
49  DecodeTensor(const TensorShape &tensorShape,
50  bool isDepthwise = false) = 0;
51 };
52 
53 template<typename IType>
54 class Encoder : public BaseIterator
55 {
56 public:
57  Encoder() {}
58 
59  virtual ~Encoder() {}
60 
61  virtual void Reset(void*) = 0;
62 
63  virtual void Set(IType right) = 0;
64 
65  virtual IType Get() const = 0;
66 };
67 
68 template<typename T, typename Base>
69 class TypedIterator : public Base
70 {
71 public:
72  TypedIterator(T* data = nullptr)
73  : m_Iterator(data), m_Start(data)
74  {}
75 
76  void Reset(void* data) override
77  {
78  m_Iterator = reinterpret_cast<T*>(data);
79  m_Start = m_Iterator;
80  }
81 
83  {
84  ARMNN_ASSERT(m_Iterator);
85  ++m_Iterator;
86  return *this;
87  }
88 
89  TypedIterator& operator+=(const unsigned int increment) override
90  {
91  ARMNN_ASSERT(m_Iterator);
92  m_Iterator += increment;
93  return *this;
94  }
95 
96  TypedIterator& operator-=(const unsigned int increment) override
97  {
98  ARMNN_ASSERT(m_Iterator);
99  m_Iterator -= increment;
100  return *this;
101  }
102 
103  TypedIterator& operator[](const unsigned int index) override
104  {
105  ARMNN_ASSERT(m_Iterator);
106  m_Iterator = m_Start + index;
107  return *this;
108  }
109 
110 protected:
113 };
114 
115 class QASymm8Decoder : public TypedIterator<const uint8_t, Decoder<float>>
116 {
117 public:
118  QASymm8Decoder(const uint8_t* data, const float scale, const int32_t offset)
119  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
120 
121  QASymm8Decoder(const float scale, const int32_t offset)
122  : QASymm8Decoder(nullptr, scale, offset) {}
123 
124  float Get() const override
125  {
126  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
127  }
128  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
129  const bool isDepthwise) override
130  {
131  IgnoreUnused(isDepthwise);
132 
133  const unsigned int size = tensorShape.GetNumElements();
134  std::vector<float> decodedTensor;
135  decodedTensor.reserve(size);
136 
137  for (uint32_t i = 0; i < size; ++i)
138  {
139  this->operator[](i);
140  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
141  }
142 
143  return decodedTensor;
144  }
145 
146 private:
147 
148  const float m_Scale;
149  const int32_t m_Offset;
150 };
151 
152 class QASymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
153 {
154 public:
155  QASymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
156  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
157 
158  QASymmS8Decoder(const float scale, const int32_t offset)
159  : QASymmS8Decoder(nullptr, scale, offset) {}
160 
161  float Get() const override
162  {
163  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
164  }
165  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
166  const bool isDepthwise) override
167  {
168  IgnoreUnused(isDepthwise);
169 
170  const unsigned int size = tensorShape.GetNumElements();
171  std::vector<float> decodedTensor;
172  decodedTensor.reserve(size);
173 
174  for (uint32_t i = 0; i < size; ++i)
175  {
176  this->operator[](i);
177  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
178  }
179 
180  return decodedTensor;
181  }
182 
183 private:
184  const float m_Scale;
185  const int32_t m_Offset;
186 
187 };
188 
189 class QSymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
190 {
191 public:
192  QSymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
193  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
194 
195  QSymmS8Decoder(const float scale, const int32_t offset)
196  : QSymmS8Decoder(nullptr, scale, offset) {}
197 
198  float Get() const override
199  {
200  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
201  }
202  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
203  const bool isDepthwise) override
204  {
205  IgnoreUnused(isDepthwise);
206 
207  const unsigned int size = tensorShape.GetNumElements();
208  std::vector<float> decodedTensor;
209  decodedTensor.reserve(size);
210 
211  for (uint32_t i = 0; i < size; ++i)
212  {
213  this->operator[](i);
214  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
215  }
216 
217  return decodedTensor;
218  }
219 
220 private:
221  const float m_Scale;
222  const int32_t m_Offset;
223 
224 };
225 
226 class QSymm16Decoder : public TypedIterator<const int16_t, Decoder<float>>
227 {
228 public:
229  QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset)
230  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
231 
232  QSymm16Decoder(const float scale, const int32_t offset)
233  : QSymm16Decoder(nullptr, scale, offset) {}
234 
235  float Get() const override
236  {
237  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
238  }
239  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
240  const bool isDepthwise) override
241  {
242  IgnoreUnused(isDepthwise);
243 
244  const unsigned int size = tensorShape.GetNumElements();
245  std::vector<float> decodedTensor;
246  decodedTensor.reserve(size);
247 
248  for (uint32_t i = 0; i < size; ++i)
249  {
250  this->operator[](i);
251  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
252  }
253 
254  return decodedTensor;
255  }
256 
257 private:
258  const float m_Scale;
259  const int32_t m_Offset;
260 
261 };
262 
263 class BFloat16Decoder : public TypedIterator<const BFloat16, Decoder<float>>
264 {
265 public:
267  : TypedIterator(data) {}
268 
270  : BFloat16Decoder(nullptr) {}
271 
272  float Get() const override
273  {
274  float val = 0.f;
276  return val;
277  }
278  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
279  const bool isDepthwise) override
280  {
281  IgnoreUnused(isDepthwise);
282 
283  const unsigned int size = tensorShape.GetNumElements();
284  std::vector<float> decodedTensor;
285  decodedTensor.reserve(size);
286 
287  for (uint32_t i = 0; i < size; ++i)
288  {
289  this->operator[](i);
290 
291  float val = 0.f;
293  decodedTensor.emplace_back(val);
294  }
295 
296  return decodedTensor;
297  }
298 
299 };
300 
301 class Float16Decoder : public TypedIterator<const Half, Decoder<float>>
302 {
303 public:
304  Float16Decoder(const Half* data)
305  : TypedIterator(data) {}
306 
308  : Float16Decoder(nullptr) {}
309 
310  float Get() const override
311  {
312  float val = 0.f;
314  return val;
315  }
316  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
317  const bool isDepthwise) override
318  {
319  IgnoreUnused(isDepthwise);
320 
321  const unsigned int size = tensorShape.GetNumElements();
322  std::vector<float> decodedTensor;
323  decodedTensor.reserve(size);
324 
325  for (uint32_t i = 0; i < size; ++i)
326  {
327  float val = 0.f;
328  this->operator[](i);
330  decodedTensor.emplace_back(val);
331  }
332 
333  return decodedTensor;
334  }
335 
336 
337 };
338 
339 class Float32Decoder : public TypedIterator<const float, Decoder<float>>
340 {
341 public:
342  Float32Decoder(const float* data)
343  : TypedIterator(data) {}
344 
346  : Float32Decoder(nullptr) {}
347 
348  float Get() const override
349  {
350  return *m_Iterator;
351  }
352  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
353  const bool isDepthwise) override
354  {
355  IgnoreUnused(isDepthwise);
356  const unsigned int size = tensorShape.GetNumElements();
357  std::vector<float> decodedTensor;
358 
359  decodedTensor.reserve(size);
360  decodedTensor.assign(m_Start, m_Start + size);
361 
362  return decodedTensor;
363  }
364 };
365 
366 class ScaledInt32Decoder : public TypedIterator<const int32_t, Decoder<float>>
367 {
368 public:
369  ScaledInt32Decoder(const int32_t* data, const float scale)
370  : TypedIterator(data), m_Scale(scale) {}
371 
372  ScaledInt32Decoder(const float scale)
373  : ScaledInt32Decoder(nullptr, scale) {}
374 
375  float Get() const override
376  {
377  return static_cast<float>(*m_Iterator) * m_Scale;
378  }
379  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
380  const bool isDepthwise) override
381  {
382  IgnoreUnused(isDepthwise);
383 
384  const unsigned int size = tensorShape.GetNumElements();
385  std::vector<float> decodedTensor;
386  decodedTensor.reserve(size);
387 
388  for (uint32_t i = 0; i < size; ++i)
389  {
390  this->operator[](i);
391  decodedTensor.emplace_back(static_cast<float>(*m_Iterator) * m_Scale);
392  }
393 
394  return decodedTensor;
395  }
396 
397 private:
398  const float m_Scale;
399 
400 };
401 
402 class Int32Decoder : public TypedIterator<const int32_t, Decoder<float>>
403 {
404 public:
405  Int32Decoder(const int32_t* data)
406  : TypedIterator(data) {}
407 
409  : Int32Decoder(nullptr) {}
410 
411  float Get() const override
412  {
413  return static_cast<float>(*m_Iterator);
414  }
415  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
416  const bool isDepthwise) override
417  {
418  IgnoreUnused(isDepthwise);
419 
420  const unsigned int size = tensorShape.GetNumElements();
421  std::vector<float> decodedTensor;
422  decodedTensor.reserve(size);
423 
424  for (uint32_t i = 0; i < size; ++i)
425  {
426  this->operator[](i);
427  decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
428  }
429 
430  return decodedTensor;
431  }
432 };
433 
434 class Int32ToInt32tDecoder : public TypedIterator<const int32_t, Decoder<int32_t>>
435 {
436 public:
437  Int32ToInt32tDecoder(const int32_t* data)
438  : TypedIterator(data){}
439 
441  : Int32ToInt32tDecoder(nullptr) {}
442 
443  int32_t Get() const override
444  {
445  return *m_Iterator;
446  }
447  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
448  const bool isDepthwise) override
449  {
450  IgnoreUnused(isDepthwise);
451 
452  const unsigned int size = tensorShape.GetNumElements();
453  std::vector<float> decodedTensor;
454  decodedTensor.reserve(size);
455 
456  for (uint32_t i = 0; i < size; ++i)
457  {
458  this->operator[](i);
459  decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
460  }
461 
462  return decodedTensor;
463  }
464 };
465 
466 class BooleanDecoder : public TypedIterator<const uint8_t, Decoder<float>>
467 {
468 public:
469  BooleanDecoder(const uint8_t* data)
470  : TypedIterator(data) {}
471 
473  : BooleanDecoder(nullptr) {}
474 
475  float Get() const override
476  {
477  return *m_Iterator;
478  }
479  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
480  const bool isDepthwise) override
481  {
482  IgnoreUnused(isDepthwise);
483 
484  const unsigned int size = tensorShape.GetNumElements();
485  std::vector<float> decodedTensor;
486  decodedTensor.reserve(size);
487 
488  for (uint32_t i = 0; i < size; ++i)
489  {
490  this->operator[](i);
491  decodedTensor.emplace_back(*m_Iterator);
492  }
493 
494  return decodedTensor;
495  }
496 };
497 
498 class BooleanDecoderBool : public TypedIterator<const uint8_t, Decoder<bool>>
499 {
500 public:
501  BooleanDecoderBool(const uint8_t* data)
502  : TypedIterator(data) {}
503 
505  : BooleanDecoderBool(nullptr) {}
506 
507  bool Get() const override
508  {
509  return *m_Iterator;
510  }
511 
512  std::vector<float> DecodeTensor(const TensorShape& tensorShape,
513  const bool isDepthwise) override
514  {
515  IgnoreUnused(isDepthwise);
516 
517  const unsigned int size = tensorShape.GetNumElements();
518  std::vector<float> decodedTensor;
519  decodedTensor.reserve(size);
520 
521  for (uint32_t i = 0; i < size; ++i)
522  {
523  this->operator[](i);
524  decodedTensor.emplace_back(*m_Iterator);
525  }
526 
527  return decodedTensor;
528  }
529 };
530 
531 class QASymm8Encoder : public TypedIterator<uint8_t, Encoder<float>>
532 {
533 public:
534  QASymm8Encoder(uint8_t* data, const float scale, const int32_t offset)
535  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
536 
537  QASymm8Encoder(const float scale, const int32_t offset)
538  : QASymm8Encoder(nullptr, scale, offset) {}
539 
540  void Set(float right) override
541  {
542  *m_Iterator = armnn::Quantize<uint8_t>(right, m_Scale, m_Offset);
543  }
544 
545  float Get() const override
546  {
547  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
548  }
549 
550 private:
551  const float m_Scale;
552  const int32_t m_Offset;
553 };
554 
555 class QASymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
556 {
557 public:
558  QASymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
559  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
560 
561  QASymmS8Encoder(const float scale, const int32_t offset)
562  : QASymmS8Encoder(nullptr, scale, offset) {}
563 
564  void Set(float right) override
565  {
566  *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
567  }
568 
569  float Get() const override
570  {
571  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
572  }
573 
574 private:
575  const float m_Scale;
576  const int32_t m_Offset;
577 };
578 
579 class QSymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
580 {
581 public:
582  QSymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
583  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
584 
585  QSymmS8Encoder(const float scale, const int32_t offset)
586  : QSymmS8Encoder(nullptr, scale, offset) {}
587 
588  void Set(float right) override
589  {
590  *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
591  }
592 
593  float Get() const override
594  {
595  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
596  }
597 
598 private:
599  const float m_Scale;
600  const int32_t m_Offset;
601 };
602 
603 class QSymm16Encoder : public TypedIterator<int16_t, Encoder<float>>
604 {
605 public:
606  QSymm16Encoder(int16_t* data, const float scale, const int32_t offset)
607  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
608 
609  QSymm16Encoder(const float scale, const int32_t offset)
610  : QSymm16Encoder(nullptr, scale, offset) {}
611 
612  void Set(float right) override
613  {
614  *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale, m_Offset);
615  }
616 
617  float Get() const override
618  {
619  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
620  }
621 
622 private:
623  const float m_Scale;
624  const int32_t m_Offset;
625 };
626 
627 class BFloat16Encoder : public TypedIterator<armnn::BFloat16, Encoder<float>>
628 {
629 public:
631  : TypedIterator(data) {}
632 
634  : BFloat16Encoder(nullptr) {}
635 
636  void Set(float right) override
637  {
639  }
640 
641  float Get() const override
642  {
643  float val = 0.f;
645  return val;
646  }
647 };
648 
649 class Float16Encoder : public TypedIterator<Half, Encoder<float>>
650 {
651 public:
653  : TypedIterator(data) {}
654 
656  : Float16Encoder(nullptr) {}
657 
658  void Set(float right) override
659  {
661  }
662 
663  float Get() const override
664  {
665  float val = 0.f;
667  return val;
668  }
669 };
670 
671 class Float32Encoder : public TypedIterator<float, Encoder<float>>
672 {
673 public:
674  Float32Encoder(float* data)
675  : TypedIterator(data) {}
676 
678  : Float32Encoder(nullptr) {}
679 
680  void Set(float right) override
681  {
682  *m_Iterator = right;
683  }
684 
685  float Get() const override
686  {
687  return *m_Iterator;
688  }
689 };
690 
691 class Int32Encoder : public TypedIterator<int32_t, Encoder<float>>
692 {
693 public:
694  Int32Encoder(int32_t* data)
695  : TypedIterator(data) {}
696 
698  : Int32Encoder(nullptr) {}
699 
700  void Set(float right) override
701  {
702  *m_Iterator = static_cast<int32_t>(right);
703  }
704 
705  float Get() const override
706  {
707  return static_cast<float>(*m_Iterator);
708  }
709 };
710 
711 class Int32ToInt32tEncoder : public TypedIterator<int32_t, Encoder<int32_t>>
712 {
713 public:
714  Int32ToInt32tEncoder(int32_t* data)
715  : TypedIterator(data){}
716 
718  : Int32ToInt32tEncoder(nullptr) {}
719 
720  void Set(int32_t right) override
721  {
722  *m_Iterator = right;
723  }
724 
725  int32_t Get() const override
726  {
727  return *m_Iterator;
728  }
729 };
730 
731 class BooleanEncoder : public TypedIterator<uint8_t, Encoder<bool>>
732 {
733 public:
734  BooleanEncoder(uint8_t* data)
735  : TypedIterator(data) {}
736 
738  : BooleanEncoder(nullptr) {}
739 
740  void Set(bool right) override
741  {
742  *m_Iterator = right;
743  }
744 
745  bool Get() const override
746  {
747  return *m_Iterator;
748  }
749 };
750 
751 /// PerAxisIterator for per-axis quantization. Iterates over a tensor as layed out in memory and keeps track
752 /// of the axis index.
753 template<typename T, typename Base>
754 class PerAxisIterator : public Base
755 {
756 public:
757  PerAxisIterator(T* data = nullptr,
758  unsigned int axisFactor = 0,
759  unsigned int axisDimensionality=0)
760  : m_Iterator(data),
761  m_Start(data),
762  m_AxisIndex(0), // iterates over the dimension of axis
763  m_AxisDimensionality(axisDimensionality), // tensorShape[quantization_dim]
764  m_AxisFactor(axisFactor),
765  m_Index(0)
766  {}
767 
768  PerAxisIterator(T* data = nullptr,
769  const armnn::TensorShape& tensorShape = TensorShape(),
770  const unsigned int axis = 0)
771  : m_Iterator(data),
772  m_Start(data),
773  m_AxisIndex(0),
774  m_Index(0)
775  {
776  m_AxisDimensionality = tensorShape[axis];
777  m_AxisFactor = armnnUtils::GetNumElementsAfter(tensorShape, axis);
778  }
779 
780  void Reset(void* data) override
781  {
782  m_Iterator = reinterpret_cast<T*>(data);
783  m_Start = m_Iterator;
784  m_AxisIndex = 0;
785  m_Index = 0;
786  }
787 
789  {
790  ++m_Index;
791  this -> operator[](m_Index);
792  return *this;
793  }
794 
795  PerAxisIterator& operator+=(const unsigned int increment) override
796  {
797  m_Index += increment;
798  this -> operator[](m_Index);
799  return *this;
800  }
801 
802  PerAxisIterator& operator-=(const unsigned int decrement) override
803  {
804  m_Index -= decrement;
805  this -> operator[](m_Index);
806  return *this;
807  }
808 
809 
810  inline PerAxisIterator& SetIndexOnMem(const unsigned int index)
811  {
812  ARMNN_ASSERT(m_Iterator);
813  m_Iterator = m_Start + index;
814  if (index < m_AxisFactor)
815  {
816  m_AxisIndex = 0;
817  }
818  else
819  {
820  m_AxisIndex = (index / m_AxisFactor) % m_AxisDimensionality;
821  }
822  m_Index = index;
823  return *this;
824  }
825 
826  PerAxisIterator& operator[](const unsigned int index) override
827  {
828  SetIndexOnMem(index);
829  return *this;
830  }
831 
832  protected:
835  unsigned int m_AxisIndex;
836  unsigned int m_AxisDimensionality; // tensorShape[quantization_dim]
837  unsigned int m_AxisFactor;
838  unsigned int m_Index;
839 };
840 
841 class QSymm8PerAxisDecoder : public PerAxisIterator<const int8_t, Decoder<float>>
842 {
843 public:
844  QSymm8PerAxisDecoder(const int8_t* data, const armnn::TensorInfo& tensorInfo)
845  : PerAxisIterator(data, tensorInfo.GetShape(), tensorInfo.GetQuantizationDim().value()),
846  m_Scales(tensorInfo.GetQuantizationScales())
847  {}
848 
849  float Get() const override
850  {
851  return armnn::Dequantize(*m_Iterator, GetScale(), 0);
852  }
853 
854  // Get scale of the current value
855  float GetScale() const
856  {
857  return m_Scales[m_AxisIndex];
858  }
859 
860  std::vector<float> DecodeTensor(const TensorShape &tensorShape,
861  bool isDepthwise) override
862  {
863  IgnoreUnused(isDepthwise);
864 
865  const unsigned int size = tensorShape.GetNumElements();
866  std::vector<float> decodedTensor;
867  decodedTensor.reserve(size);
868 
869  for (uint32_t i = 0; i < size; ++i)
870  {
871  SetIndexOnMem(i);
872  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, GetScale(), 0));
873  }
874  return decodedTensor;
875  }
876 
877 private:
878  std::vector<float> m_Scales;
879 };
880 
881 class QSymm8PerAxisEncoder : public PerAxisIterator<int8_t, Encoder<float>>
882 {
883 public:
884  QSymm8PerAxisEncoder(int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
885  : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
886 
887  void Set(float right)
888  {
889  *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale[m_AxisIndex], 0);
890  }
891 
892  float Get() const
893  {
894  return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
895  }
896 
897  // Get scale of the current value
898  float GetScale() const
899  {
900  return m_Scale[m_AxisIndex];
901  }
902 
903 private:
904  std::vector<float> m_Scale;
905 };
906 
907 class ScaledInt32PerAxisDecoder : public PerAxisIterator<const int32_t, Decoder<float>>
908 {
909 public:
910  ScaledInt32PerAxisDecoder(const int32_t* data, const armnn::TensorInfo tensorInfo)
911  : PerAxisIterator(data, tensorInfo.GetShape(), tensorInfo.GetQuantizationDim().value()),
912  m_Scales(tensorInfo.GetQuantizationScales())
913  {}
914 
915  float Get() const override
916  {
917  return armnn::Dequantize(*m_Iterator, m_Scales[m_AxisIndex], 0);
918  }
919 
920  // Get scale of the current value
921  float GetScale() const
922  {
923  return m_Scales[m_AxisIndex];
924  }
925 
926  std::vector<float> DecodeTensor(const TensorShape &tensorShape,
927  bool isDepthwise) override
928  {
929  const uint32_t size = tensorShape.GetNumElements();
930 
931  const uint32_t stepSize = isDepthwise ?
932  tensorShape[2] * tensorShape[3] : tensorShape.GetNumElements() / tensorShape[0];
933 
934  const uint32_t stepNum = size / stepSize;
935 
936  std::vector<float> decodedTensor;
937  decodedTensor.reserve(size);
938 
939  // channelMultiplier is only used in depthwise convolutions and in other cases will have no effect
940  // stepSize is the length of a contiguous area sharing a quantization scale within a tensor
941  // stepNum is the number of those steps/blocks in the tensor
942  for (uint32_t step = 0; step < stepNum; ++step)
943  {
944  //scale = (channelMultiplier * step + mult) % scaleSize;
945  for (uint32_t i = 0; i < stepSize; ++i)
946  {
947  unsigned int index = step * stepSize + i;
948  this->operator[](index);
949  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scales[step], 0));
950  }
951  }
952  return decodedTensor;
953  }
954 
955 private:
956  std::vector<float> m_Scales;
957 };
958 
959 } // namespace armnn
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:181
PerAxisIterator & operator++() override
Float32Decoder(const float *data)
void Set(float right) override
ScaledInt32Decoder(const float scale)
void Set(int32_t right) override
BFloat16Decoder(const BFloat16 *data)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
virtual BaseIterator & operator-=(const unsigned int increment)=0
PerAxisIterator for per-axis quantization.
float Get() const override
PerAxisIterator(T *data=nullptr, const armnn::TensorShape &tensorShape=TensorShape(), const unsigned int axis=0)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
PerAxisIterator & operator[](const unsigned int index) override
QSymmS8Decoder(const float scale, const int32_t offset)
float Get() const override
QSymm16Encoder(const float scale, const int32_t offset)
static void ConvertBFloat16ToFloat32(const void *srcBFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
void Reset(void *data) override
Int32ToInt32tEncoder(int32_t *data)
void Set(float right) override
ScaledInt32Decoder(const int32_t *data, const float scale)
int32_t Get() const override
QSymm16Decoder(const int16_t *data, const float scale, const int32_t offset)
BooleanDecoderBool(const uint8_t *data)
void Set(bool right) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, bool isDepthwise) override
QSymm8PerAxisDecoder(const int8_t *data, const armnn::TensorInfo &tensorInfo)
float Get() const override
QSymmS8Encoder(int8_t *data, const float scale, const int32_t offset)
float Get() const override
BFloat16Encoder(armnn::BFloat16 *data)
void Set(float right) override
QSymmS8Decoder(const int8_t *data, const float scale, const int32_t offset)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
float Get() const override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
void Set(float right) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
float Get() const override
float Get() const override
QASymmS8Decoder(const int8_t *data, const float scale, const int32_t offset)
float Get() const override
unsigned int m_AxisDimensionality
Int32Decoder(const int32_t *data)
float Get() const override
virtual BaseIterator & operator[](const unsigned int index)=0
TypedIterator & operator[](const unsigned int index) override
QASymmS8Decoder(const float scale, const int32_t offset)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
QASymm8Encoder(const float scale, const int32_t offset)
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer)
Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
float Get() const override
QSymmS8Encoder(const float scale, const int32_t offset)
ScaledInt32PerAxisDecoder(const int32_t *data, const armnn::TensorInfo tensorInfo)
Int32Encoder(int32_t *data)
void Set(float right) override
void Reset(void *data) override
virtual ~Decoder()
virtual BaseIterator & operator++()=0
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
float Get() const override
QASymm8Encoder(uint8_t *data, const float scale, const int32_t offset)
float Get() const override
static void ConvertFloat16To32(const void *srcFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
int32_t Get() const override
BooleanEncoder(uint8_t *data)
float Get() const override
bool Get() const override
PerAxisIterator(T *data=nullptr, unsigned int axisFactor=0, unsigned int axisDimensionality=0)
float Get() const override
float Get() const override
Float16Encoder(Half *data)
QSymm8PerAxisEncoder(int8_t *data, const std::vector< float > &scale, unsigned int axisFactor)
QASymm8Decoder(const float scale, const int32_t offset)
Int32ToInt32tDecoder(const int32_t *data)
TypedIterator & operator++() override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
Float16Decoder(const Half *data)
float Get() const override
float Get() const override
QASymmS8Encoder(const float scale, const int32_t offset)
PerAxisIterator & operator-=(const unsigned int decrement) override
virtual BaseIterator & operator+=(const unsigned int increment)=0
static void ConvertFloat32ToBFloat16(const float *srcFloat32Buffer, size_t numElements, void *dstBFloat16Buffer)
void Set(float right) override
QSymm16Decoder(const float scale, const int32_t offset)
Float32Encoder(float *data)
float Get() const override
QASymm8Decoder(const uint8_t *data, const float scale, const int32_t offset)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
TypedIterator & operator+=(const unsigned int increment) override
PerAxisIterator & SetIndexOnMem(const unsigned int index)
virtual ~Encoder()
float Get() const override
QSymm16Encoder(int16_t *data, const float scale, const int32_t offset)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
unsigned int GetNumElementsAfter(const armnn::TensorShape &shape, unsigned int axis)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const bool isDepthwise) override
PerAxisIterator & operator+=(const unsigned int increment) override
void Set(float right) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, bool isDepthwise) override
half_float::half Half
Definition: Half.hpp:18
QASymmS8Encoder(int8_t *data, const float scale, const int32_t offset)
BooleanDecoder(const uint8_t *data)
TypedIterator & operator-=(const unsigned int increment) override
TypedIterator(T *data=nullptr)
void Set(float right) override
bool Get() const override
float Get() const override