ArmNN  NotReleased
Dequantize.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Dequantize.hpp"
7 
8 #include <boost/core/ignore_unused.hpp>
9 namespace armnn
10 {
11 
12 void Dequantize(Decoder<float>& inputDecoder,
13  Encoder<float>& outputEncoder,
14  const TensorInfo& inputInfo,
15  const TensorInfo& outputInfo)
16 {
17  boost::ignore_unused(outputInfo);
18  BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
19  for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
20  {
21  // inputDecoder.Get() dequantizes the data element from whatever
22  // type is given by inputInfo to fp32 (If MakeDecoder supports that dequantization)
23  // outputEncoder.Set() transforms the data element to whatever type is
24  // given by outputInfo (if MakeEncoder supports that transformation)
25  outputEncoder.Set(inputDecoder.Get());
26  ++outputEncoder;
27  ++inputDecoder;
28  }
29 }
30 
31 } // armnn namespace
float Dequantize(QuantizedType value, float scale, int32_t offset)
Definition: TypesUtils.cpp:47
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
virtual IType Get() const =0
virtual void Set(IType right)=0