ArmNN
 20.11
BaseIterator.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/TypesUtils.hpp>
12 
13 #include <ResolveType.hpp>
14 
15 namespace armnn
16 {
17 
19 {
20 public:
22 
23  virtual ~BaseIterator() {}
24 
25  virtual BaseIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) = 0;
26 
27  virtual BaseIterator& operator++() = 0;
28 
29  virtual BaseIterator& operator+=(const unsigned int increment) = 0;
30 
31  virtual BaseIterator& operator-=(const unsigned int increment) = 0;
32 
33  virtual BaseIterator& operator[](const unsigned int index) = 0;
34 };
35 
36 template<typename IType>
37 class Decoder : public BaseIterator
38 {
39 public:
40  Decoder() {}
41 
42  virtual ~Decoder() {}
43 
44  virtual void Reset(void*) = 0;
45 
46  virtual IType Get() const = 0;
47 
48  virtual std::vector<float>
49  DecodeTensor(const TensorShape &tensorShape,
50  const unsigned int channelMultiplier = 1,
51  bool isDepthwise = false) = 0;
52 };
53 
54 template<typename IType>
55 class Encoder : public BaseIterator
56 {
57 public:
58  Encoder() {}
59 
60  virtual ~Encoder() {}
61 
62  virtual void Reset(void*) = 0;
63 
64  virtual void Set(IType right) = 0;
65 
66  virtual IType Get() const = 0;
67 };
68 
69 template<typename T, typename Base>
70 class TypedIterator : public Base
71 {
72 public:
73  TypedIterator(T* data = nullptr)
74  : m_Iterator(data), m_Start(data)
75  {}
76 
77  void Reset(void* data) override
78  {
79  m_Iterator = reinterpret_cast<T*>(data);
80  m_Start = m_Iterator;
81  }
82 
84  {
85  ARMNN_ASSERT(m_Iterator);
86  ++m_Iterator;
87  return *this;
88  }
89 
90  TypedIterator& operator+=(const unsigned int increment) override
91  {
92  ARMNN_ASSERT(m_Iterator);
93  m_Iterator += increment;
94  return *this;
95  }
96 
97  TypedIterator& operator-=(const unsigned int increment) override
98  {
99  ARMNN_ASSERT(m_Iterator);
100  m_Iterator -= increment;
101  return *this;
102  }
103 
104  TypedIterator& operator[](const unsigned int index) override
105  {
106  ARMNN_ASSERT(m_Iterator);
107  m_Iterator = m_Start + index;
108  return *this;
109  }
110 
111  TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
112  {
113  IgnoreUnused(axisIndex);
114  ARMNN_ASSERT(m_Iterator);
115  m_Iterator = m_Start + index;
116  return *this;
117  }
118 
119 protected:
122 };
123 
124 class QASymm8Decoder : public TypedIterator<const uint8_t, Decoder<float>>
125 {
126 public:
127  QASymm8Decoder(const uint8_t* data, const float scale, const int32_t offset)
128  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
129 
130  QASymm8Decoder(const float scale, const int32_t offset)
131  : QASymm8Decoder(nullptr, scale, offset) {}
132 
133  float Get() const override
134  {
135  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
136  }
137  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
138  const unsigned int channelMultiplier,
139  const bool isDepthwise) override
140  {
141  IgnoreUnused(channelMultiplier, isDepthwise);
142 
143  const unsigned int size = tensorShape.GetNumElements();
144  std::vector<float> decodedTensor;
145  decodedTensor.reserve(size);
146 
147  for (uint32_t i = 0; i < size; ++i)
148  {
149  this->operator[](i);
150  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
151  }
152 
153  return decodedTensor;
154  }
155 
156 private:
157 
158  const float m_Scale;
159  const int32_t m_Offset;
160 };
161 
162 class QASymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
163 {
164 public:
165  QASymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
166  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
167 
168  QASymmS8Decoder(const float scale, const int32_t offset)
169  : QASymmS8Decoder(nullptr, scale, offset) {}
170 
171  float Get() const override
172  {
173  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
174  }
175  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
176  const unsigned int channelMultiplier,
177  const bool isDepthwise) override
178  {
179  IgnoreUnused(channelMultiplier, isDepthwise);
180 
181  const unsigned int size = tensorShape.GetNumElements();
182  std::vector<float> decodedTensor;
183  decodedTensor.reserve(size);
184 
185  for (uint32_t i = 0; i < size; ++i)
186  {
187  this->operator[](i);
188  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
189  }
190 
191  return decodedTensor;
192  }
193 
194 private:
195  const float m_Scale;
196  const int32_t m_Offset;
197 
198 };
199 
200 class QSymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
201 {
202 public:
203  QSymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
204  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
205 
206  QSymmS8Decoder(const float scale, const int32_t offset)
207  : QSymmS8Decoder(nullptr, scale, offset) {}
208 
209  float Get() const override
210  {
211  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
212  }
213  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
214  const unsigned int channelMultiplier,
215  const bool isDepthwise) override
216  {
217  IgnoreUnused(channelMultiplier, isDepthwise);
218 
219  const unsigned int size = tensorShape.GetNumElements();
220  std::vector<float> decodedTensor;
221  decodedTensor.reserve(size);
222 
223  for (uint32_t i = 0; i < size; ++i)
224  {
225  this->operator[](i);
226  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
227  }
228 
229  return decodedTensor;
230  }
231 
232 private:
233  const float m_Scale;
234  const int32_t m_Offset;
235 
236 };
237 
238 class QSymm16Decoder : public TypedIterator<const int16_t, Decoder<float>>
239 {
240 public:
241  QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset)
242  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
243 
244  QSymm16Decoder(const float scale, const int32_t offset)
245  : QSymm16Decoder(nullptr, scale, offset) {}
246 
247  float Get() const override
248  {
249  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
250  }
251  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
252  const unsigned int channelMultiplier,
253  const bool isDepthwise) override
254  {
255  IgnoreUnused(channelMultiplier, isDepthwise);
256 
257  const unsigned int size = tensorShape.GetNumElements();
258  std::vector<float> decodedTensor;
259  decodedTensor.reserve(size);
260 
261  for (uint32_t i = 0; i < size; ++i)
262  {
263  this->operator[](i);
264  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scale, m_Offset));
265  }
266 
267  return decodedTensor;
268  }
269 
270 private:
271  const float m_Scale;
272  const int32_t m_Offset;
273 
274 };
275 
276 class BFloat16Decoder : public TypedIterator<const BFloat16, Decoder<float>>
277 {
278 public:
280  : TypedIterator(data) {}
281 
283  : BFloat16Decoder(nullptr) {}
284 
285  float Get() const override
286  {
287  float val = 0.f;
289  return val;
290  }
291  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
292  const unsigned int channelMultiplier,
293  const bool isDepthwise) override
294  {
295  IgnoreUnused(channelMultiplier, isDepthwise);
296 
297  const unsigned int size = tensorShape.GetNumElements();
298  std::vector<float> decodedTensor;
299  decodedTensor.reserve(size);
300 
301  for (uint32_t i = 0; i < size; ++i)
302  {
303  this->operator[](i);
304 
305  float val = 0.f;
307  decodedTensor.emplace_back(val);
308  }
309 
310  return decodedTensor;
311  }
312 
313 };
314 
315 class Float16Decoder : public TypedIterator<const Half, Decoder<float>>
316 {
317 public:
318  Float16Decoder(const Half* data)
319  : TypedIterator(data) {}
320 
322  : Float16Decoder(nullptr) {}
323 
324  float Get() const override
325  {
326  float val = 0.f;
328  return val;
329  }
330  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
331  const unsigned int channelMultiplier,
332  const bool isDepthwise) override
333  {
334  IgnoreUnused(channelMultiplier, isDepthwise);
335 
336  const unsigned int size = tensorShape.GetNumElements();
337  std::vector<float> decodedTensor;
338  decodedTensor.reserve(size);
339 
340  for (uint32_t i = 0; i < size; ++i)
341  {
342  float val = 0.f;
343  this->operator[](i);
345  decodedTensor.emplace_back(val);
346  }
347 
348  return decodedTensor;
349  }
350 
351 
352 };
353 
354 class Float32Decoder : public TypedIterator<const float, Decoder<float>>
355 {
356 public:
357  Float32Decoder(const float* data)
358  : TypedIterator(data) {}
359 
361  : Float32Decoder(nullptr) {}
362 
363  float Get() const override
364  {
365  return *m_Iterator;
366  }
367  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
368  const unsigned int channelMultiplier,
369  const bool isDepthwise) override
370  {
371  IgnoreUnused(channelMultiplier, isDepthwise);
372  const unsigned int size = tensorShape.GetNumElements();
373  std::vector<float> decodedTensor;
374 
375  decodedTensor.reserve(size);
376  decodedTensor.assign(m_Start, m_Start + size);
377 
378  return decodedTensor;
379  }
380 };
381 
382 class ScaledInt32Decoder : public TypedIterator<const int32_t, Decoder<float>>
383 {
384 public:
385  ScaledInt32Decoder(const int32_t* data, const float scale)
386  : TypedIterator(data), m_Scale(scale) {}
387 
388  ScaledInt32Decoder(const float scale)
389  : ScaledInt32Decoder(nullptr, scale) {}
390 
391  float Get() const override
392  {
393  return static_cast<float>(*m_Iterator) * m_Scale;
394  }
395  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
396  const unsigned int channelMultiplier,
397  const bool isDepthwise) override
398  {
399  IgnoreUnused(channelMultiplier, isDepthwise);
400 
401  const unsigned int size = tensorShape.GetNumElements();
402  std::vector<float> decodedTensor;
403  decodedTensor.reserve(size);
404 
405  for (uint32_t i = 0; i < size; ++i)
406  {
407  this->operator[](i);
408  decodedTensor.emplace_back(static_cast<float>(*m_Iterator) * m_Scale);
409  }
410 
411  return decodedTensor;
412  }
413 
414 private:
415  const float m_Scale;
416 
417 };
418 
419 class Int32Decoder : public TypedIterator<const int32_t, Decoder<float>>
420 {
421 public:
422  Int32Decoder(const int32_t* data)
423  : TypedIterator(data) {}
424 
426  : Int32Decoder(nullptr) {}
427 
428  float Get() const override
429  {
430  return static_cast<float>(*m_Iterator);
431  }
432  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
433  const unsigned int channelMultiplier,
434  const bool isDepthwise) override
435  {
436  IgnoreUnused(channelMultiplier, isDepthwise);
437 
438  const unsigned int size = tensorShape.GetNumElements();
439  std::vector<float> decodedTensor;
440  decodedTensor.reserve(size);
441 
442  for (uint32_t i = 0; i < size; ++i)
443  {
444  this->operator[](i);
445  decodedTensor.emplace_back(static_cast<float>(*m_Iterator));
446  }
447 
448  return decodedTensor;
449  }
450 };
451 
452 class Int32ToInt32tDecoder : public TypedIterator<const int32_t, Decoder<int32_t>>
453 {
454 public:
455  Int32ToInt32tDecoder(const int32_t* data)
456  : TypedIterator(data){}
457 
459  : Int32ToInt32tDecoder(nullptr) {}
460 
461  int32_t Get() const override
462  {
463  return *m_Iterator;
464  }
465  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
466  const unsigned int channelMultiplier,
467  const bool isDepthwise) override
468  {
469  IgnoreUnused(channelMultiplier, isDepthwise);
470 
471  const unsigned int size = tensorShape.GetNumElements();
472  std::vector<float> decodedTensor;
473  decodedTensor.reserve(size);
474 
475  for (uint32_t i = 0; i < size; ++i)
476  {
477  this->operator[](i);
478  decodedTensor.emplace_back(*m_Iterator);
479  }
480 
481  return decodedTensor;
482  }
483 };
484 
485 class BooleanDecoder : public TypedIterator<const uint8_t, Decoder<float>>
486 {
487 public:
488  BooleanDecoder(const uint8_t* data)
489  : TypedIterator(data) {}
490 
492  : BooleanDecoder(nullptr) {}
493 
494  float Get() const override
495  {
496  return *m_Iterator;
497  }
498  std::vector<float> DecodeTensor (const TensorShape& tensorShape,
499  const unsigned int channelMultiplier,
500  const bool isDepthwise) override
501  {
502  IgnoreUnused(channelMultiplier, isDepthwise);
503 
504  const unsigned int size = tensorShape.GetNumElements();
505  std::vector<float> decodedTensor;
506  decodedTensor.reserve(size);
507 
508  for (uint32_t i = 0; i < size; ++i)
509  {
510  this->operator[](i);
511  decodedTensor.emplace_back(*m_Iterator);
512  }
513 
514  return decodedTensor;
515  }
516 };
517 
518 class BooleanDecoderBool : public TypedIterator<const uint8_t, Decoder<bool>>
519 {
520 public:
521  BooleanDecoderBool(const uint8_t* data)
522  : TypedIterator(data) {}
523 
525  : BooleanDecoderBool(nullptr) {}
526 
527  bool Get() const override
528  {
529  return *m_Iterator;
530  }
531 
532  std::vector<float> DecodeTensor(const TensorShape& tensorShape,
533  const unsigned int channelMultiplier,
534  const bool isDepthwise) override
535  {
536  IgnoreUnused(channelMultiplier, isDepthwise);
537 
538  const unsigned int size = tensorShape.GetNumElements();
539  std::vector<float> decodedTensor;
540  decodedTensor.reserve(size);
541 
542  for (uint32_t i = 0; i < size; ++i)
543  {
544  this->operator[](i);
545  decodedTensor.emplace_back(*m_Iterator);
546  }
547 
548  return decodedTensor;
549  }
550 };
551 
552 class QASymm8Encoder : public TypedIterator<uint8_t, Encoder<float>>
553 {
554 public:
555  QASymm8Encoder(uint8_t* data, const float scale, const int32_t offset)
556  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
557 
558  QASymm8Encoder(const float scale, const int32_t offset)
559  : QASymm8Encoder(nullptr, scale, offset) {}
560 
561  void Set(float right) override
562  {
563  *m_Iterator = armnn::Quantize<uint8_t>(right, m_Scale, m_Offset);
564  }
565 
566  float Get() const override
567  {
568  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
569  }
570 
571 private:
572  const float m_Scale;
573  const int32_t m_Offset;
574 };
575 
576 class QASymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
577 {
578 public:
579  QASymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
580  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
581 
582  QASymmS8Encoder(const float scale, const int32_t offset)
583  : QASymmS8Encoder(nullptr, scale, offset) {}
584 
585  void Set(float right) override
586  {
587  *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
588  }
589 
590  float Get() const override
591  {
592  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
593  }
594 
595 private:
596  const float m_Scale;
597  const int32_t m_Offset;
598 };
599 
600 class QSymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
601 {
602 public:
603  QSymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
604  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
605 
606  QSymmS8Encoder(const float scale, const int32_t offset)
607  : QSymmS8Encoder(nullptr, scale, offset) {}
608 
609  void Set(float right) override
610  {
611  *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
612  }
613 
614  float Get() const override
615  {
616  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
617  }
618 
619 private:
620  const float m_Scale;
621  const int32_t m_Offset;
622 };
623 
624 class QSymm16Encoder : public TypedIterator<int16_t, Encoder<float>>
625 {
626 public:
627  QSymm16Encoder(int16_t* data, const float scale, const int32_t offset)
628  : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
629 
630  QSymm16Encoder(const float scale, const int32_t offset)
631  : QSymm16Encoder(nullptr, scale, offset) {}
632 
633  void Set(float right) override
634  {
635  *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale, m_Offset);
636  }
637 
638  float Get() const override
639  {
640  return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
641  }
642 
643 private:
644  const float m_Scale;
645  const int32_t m_Offset;
646 };
647 
648 class BFloat16Encoder : public TypedIterator<armnn::BFloat16, Encoder<float>>
649 {
650 public:
652  : TypedIterator(data) {}
653 
655  : BFloat16Encoder(nullptr) {}
656 
657  void Set(float right) override
658  {
660  }
661 
662  float Get() const override
663  {
664  float val = 0.f;
666  return val;
667  }
668 };
669 
670 class Float16Encoder : public TypedIterator<Half, Encoder<float>>
671 {
672 public:
674  : TypedIterator(data) {}
675 
677  : Float16Encoder(nullptr) {}
678 
679  void Set(float right) override
680  {
682  }
683 
684  float Get() const override
685  {
686  float val = 0.f;
688  return val;
689  }
690 };
691 
692 class Float32Encoder : public TypedIterator<float, Encoder<float>>
693 {
694 public:
695  Float32Encoder(float* data)
696  : TypedIterator(data) {}
697 
699  : Float32Encoder(nullptr) {}
700 
701  void Set(float right) override
702  {
703  *m_Iterator = right;
704  }
705 
706  float Get() const override
707  {
708  return *m_Iterator;
709  }
710 };
711 
712 class Int32Encoder : public TypedIterator<int32_t, Encoder<float>>
713 {
714 public:
715  Int32Encoder(int32_t* data)
716  : TypedIterator(data) {}
717 
719  : Int32Encoder(nullptr) {}
720 
721  void Set(float right) override
722  {
723  *m_Iterator = static_cast<int32_t>(right);
724  }
725 
726  float Get() const override
727  {
728  return static_cast<float>(*m_Iterator);
729  }
730 };
731 
732 class Int32ToInt32tEncoder : public TypedIterator<int32_t, Encoder<int32_t>>
733 {
734 public:
735  Int32ToInt32tEncoder(int32_t* data)
736  : TypedIterator(data){}
737 
739  : Int32ToInt32tEncoder(nullptr) {}
740 
741  void Set(int32_t right) override
742  {
743  *m_Iterator = right;
744  }
745 
746  int32_t Get() const override
747  {
748  return *m_Iterator;
749  }
750 };
751 
752 class BooleanEncoder : public TypedIterator<uint8_t, Encoder<bool>>
753 {
754 public:
755  BooleanEncoder(uint8_t* data)
756  : TypedIterator(data) {}
757 
759  : BooleanEncoder(nullptr) {}
760 
761  void Set(bool right) override
762  {
763  *m_Iterator = right;
764  }
765 
766  bool Get() const override
767  {
768  return *m_Iterator;
769  }
770 };
771 
772 // PerAxisIterator for per-axis quantization
773 template<typename T, typename Base>
774 class PerAxisIterator : public Base
775 {
776 public:
777  // axisFactor is used to calculate channelStep
778  PerAxisIterator(T* data = nullptr, unsigned int axisFactor = 0)
779  : m_Iterator(data), m_Start(data), m_AxisIndex(0), m_AxisFactor(axisFactor)
780  {}
781 
782  // This should be called to set index for per-axis Encoder/Decoder
783  PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
784  {
785  ARMNN_ASSERT(m_Iterator);
786  m_Iterator = m_Start + index;
787  m_AxisIndex = axisIndex;
788  return *this;
789  }
790 
791  void Reset(void* data) override
792  {
793  m_Iterator = reinterpret_cast<T*>(data);
794  m_Start = m_Iterator;
795  m_AxisIndex = 0;
796  }
797 
799  {
800  ARMNN_ASSERT(m_Iterator);
801  ++m_Iterator;
802  m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
803  return *this;
804  }
805 
806  PerAxisIterator& operator+=(const unsigned int increment) override
807  {
808  ARMNN_ASSERT(m_Iterator);
809  m_Iterator += increment;
810  m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
811  return *this;
812  }
813 
814  PerAxisIterator& operator-=(const unsigned int decrement) override
815  {
816  ARMNN_ASSERT(m_Iterator);
817  m_Iterator -= decrement;
818  m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
819  return *this;
820  }
821 
822  PerAxisIterator& operator[](const unsigned int index) override
823  {
824  ARMNN_ASSERT(m_Iterator);
825  m_Iterator = m_Start + index;
826  m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
827  return *this;
828  }
829 
830  protected:
833  unsigned int m_AxisIndex;
834  unsigned int m_AxisFactor;
835 };
836 
837 class QSymm8PerAxisDecoder : public PerAxisIterator<const int8_t, Decoder<float>>
838 {
839 public:
840  QSymm8PerAxisDecoder(const int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
841  : PerAxisIterator(data, axisFactor), m_Scales(scale) {}
842 
843  float Get() const override
844  {
845  return armnn::Dequantize(*m_Iterator, m_Scales[m_AxisIndex], 0);
846  }
847 
848  // Get scale of the current value
849  float GetScale() const
850  {
851  return m_Scales[m_AxisIndex];
852  }
853 
854  std::vector<float> DecodeTensor(const TensorShape &tensorShape,
855  const unsigned int channelMultiplier,
856  bool isDepthwise) override
857  {
858  const uint32_t size = tensorShape.GetNumElements();
859  const uint32_t scaleSize = static_cast<uint32_t>(m_Scales.size());
860 
861  const uint32_t stepSize = isDepthwise ?
862  tensorShape[2] * tensorShape[3] : tensorShape.GetNumElements() / tensorShape[0];
863 
864  const uint32_t stepNum = size / (stepSize * channelMultiplier);
865  uint32_t scale;
866 
867  std::vector<float> decodedTensor;
868  decodedTensor.reserve(size);
869 
870  // channelMultiplier is only used in depthwise convolutions and in other cases will have no effect
871  // stepSize is the length of a contiguous area sharing a quantization scale within a tensor
872  // stepNum is the number of those steps/blocks in the tensor
873  for (uint32_t mult = 0; mult < channelMultiplier; ++mult)
874  {
875  for (uint32_t step = 0; step < stepNum; ++step)
876  {
877  scale = (channelMultiplier * step + mult) % scaleSize;
878  for (uint32_t i = 0; i < stepSize; ++i)
879  {
880  unsigned int index = mult * stepSize * channelMultiplier +
881  step * stepSize + i;
882  this->operator[](index);
883  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scales[scale], 0));
884  }
885  }
886  }
887  return decodedTensor;
888  }
889 
890 private:
891  std::vector<float> m_Scales;
892 };
893 
894 class QSymm8PerAxisEncoder : public PerAxisIterator<int8_t, Encoder<float>>
895 {
896 public:
897  QSymm8PerAxisEncoder(int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
898  : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
899 
900  void Set(float right)
901  {
902  *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale[m_AxisIndex], 0);
903  }
904 
905  float Get() const
906  {
907  return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
908  }
909 
910  // Get scale of the current value
911  float GetScale() const
912  {
913  return m_Scale[m_AxisIndex];
914  }
915 
916 private:
917  std::vector<float> m_Scale;
918 };
919 
920 class ScaledInt32PerAxisDecoder : public PerAxisIterator<const int32_t, Decoder<float>>
921 {
922 public:
923  ScaledInt32PerAxisDecoder(const int32_t* data, const std::vector<float>& scales, unsigned int axisFactor)
924  : PerAxisIterator(data, axisFactor), m_Scales(scales) {}
925 
926  float Get() const override
927  {
928  return armnn::Dequantize(*m_Iterator, m_Scales[m_AxisIndex], 0);
929  }
930 
931  // Get scale of the current value
932  float GetScale() const
933  {
934  return m_Scales[m_AxisIndex];
935  }
936 
937  std::vector<float> DecodeTensor(const TensorShape &tensorShape,
938  const unsigned int channelMultiplier,
939  bool isDepthwise) override
940  {
941  const uint32_t size = tensorShape.GetNumElements();
942  const uint32_t scaleSize = static_cast<uint32_t>(m_Scales.size());
943 
944  const uint32_t stepSize = isDepthwise ?
945  tensorShape[2] * tensorShape[3] : tensorShape.GetNumElements() / tensorShape[0];
946 
947  const uint32_t stepNum = size / (stepSize * channelMultiplier);
948  uint32_t scale;
949 
950  std::vector<float> decodedTensor;
951  decodedTensor.reserve(size);
952 
953  // channelMultiplier is only used in depthwise convolutions and in other cases will have no effect
954  // stepSize is the length of a contiguous area sharing a quantization scale within a tensor
955  // stepNum is the number of those steps/blocks in the tensor
956  for (uint32_t mult = 0; mult < channelMultiplier; ++mult)
957  {
958  for (uint32_t step = 0; step < stepNum; ++step)
959  {
960  scale = (channelMultiplier * step + mult) % scaleSize;
961  for (uint32_t i = 0; i < stepSize; ++i)
962  {
963  unsigned int index = mult * stepSize * channelMultiplier +
964  step * stepSize + i;
965  this->operator[](index);
966  decodedTensor.emplace_back(armnn::Dequantize(*m_Iterator, m_Scales[scale], 0));
967  }
968  }
969  }
970  return decodedTensor;
971  }
972 
973 private:
974  std::vector<float> m_Scales;
975 };
976 
977 } // namespace armnn
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:181
PerAxisIterator & operator++() override
Float32Decoder(const float *data)
QSymm8PerAxisDecoder(const int8_t *data, const std::vector< float > &scale, unsigned int axisFactor)
void Set(float right) override
ScaledInt32Decoder(const float scale)
void Set(int32_t right) override
BFloat16Decoder(const BFloat16 *data)
PerAxisIterator(T *data=nullptr, unsigned int axisFactor=0)
virtual BaseIterator & operator-=(const unsigned int increment)=0
float Get() const override
PerAxisIterator & operator[](const unsigned int index) override
QSymmS8Decoder(const float scale, const int32_t offset)
float Get() const override
QSymm16Encoder(const float scale, const int32_t offset)
static void ConvertBFloat16ToFloat32(const void *srcBFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
void Reset(void *data) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, bool isDepthwise) override
Int32ToInt32tEncoder(int32_t *data)
void Set(float right) override
ScaledInt32Decoder(const int32_t *data, const float scale)
int32_t Get() const override
QSymm16Decoder(const int16_t *data, const float scale, const int32_t offset)
BooleanDecoderBool(const uint8_t *data)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
void Set(bool right) override
float Get() const override
QSymmS8Encoder(int8_t *data, const float scale, const int32_t offset)
ScaledInt32PerAxisDecoder(const int32_t *data, const std::vector< float > &scales, unsigned int axisFactor)
float Get() const override
BFloat16Encoder(armnn::BFloat16 *data)
void Set(float right) override
QSymmS8Decoder(const int8_t *data, const float scale, const int32_t offset)
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
float Get() const override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
void Set(float right) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, bool isDepthwise) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
float Get() const override
float Get() const override
QASymmS8Decoder(const int8_t *data, const float scale, const int32_t offset)
float Get() const override
Int32Decoder(const int32_t *data)
float Get() const override
virtual BaseIterator & operator[](const unsigned int index)=0
TypedIterator & operator[](const unsigned int index) override
QASymmS8Decoder(const float scale, const int32_t offset)
QASymm8Encoder(const float scale, const int32_t offset)
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer)
Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
float Get() const override
QSymmS8Encoder(const float scale, const int32_t offset)
Int32Encoder(int32_t *data)
void Set(float right) override
void Reset(void *data) override
virtual ~Decoder()
virtual BaseIterator & operator++()=0
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
float Get() const override
QASymm8Encoder(uint8_t *data, const float scale, const int32_t offset)
float Get() const override
static void ConvertFloat16To32(const void *srcFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
int32_t Get() const override
BooleanEncoder(uint8_t *data)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
float Get() const override
bool Get() const override
float Get() const override
float Get() const override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
Float16Encoder(Half *data)
QSymm8PerAxisEncoder(int8_t *data, const std::vector< float > &scale, unsigned int axisFactor)
QASymm8Decoder(const float scale, const int32_t offset)
Int32ToInt32tDecoder(const int32_t *data)
TypedIterator & operator++() override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
Float16Decoder(const Half *data)
virtual BaseIterator & SetIndex(unsigned int index, unsigned int axisIndex=0)=0
float Get() const override
float Get() const override
PerAxisIterator & SetIndex(unsigned int index, unsigned int axisIndex) override
QASymmS8Encoder(const float scale, const int32_t offset)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
PerAxisIterator & operator-=(const unsigned int decrement) override
virtual BaseIterator & operator+=(const unsigned int increment)=0
static void ConvertFloat32ToBFloat16(const float *srcFloat32Buffer, size_t numElements, void *dstBFloat16Buffer)
void Set(float right) override
QSymm16Decoder(const float scale, const int32_t offset)
Float32Encoder(float *data)
float Get() const override
QASymm8Decoder(const uint8_t *data, const float scale, const int32_t offset)
TypedIterator & operator+=(const unsigned int increment) override
virtual ~Encoder()
float Get() const override
QSymm16Encoder(int16_t *data, const float scale, const int32_t offset)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
PerAxisIterator & operator+=(const unsigned int increment) override
void Set(float right) override
half_float::half Half
Definition: Half.hpp:16
QASymmS8Encoder(int8_t *data, const float scale, const int32_t offset)
BooleanDecoder(const uint8_t *data)
TypedIterator & SetIndex(unsigned int index, unsigned int axisIndex=0) override
TypedIterator & operator-=(const unsigned int increment) override
TypedIterator(T *data=nullptr)
std::vector< float > DecodeTensor(const TensorShape &tensorShape, const unsigned int channelMultiplier, const bool isDepthwise) override
void Set(float right) override
bool Get() const override
float Get() const override