13 #include <boost/assert.hpp> 21 inline std::unique_ptr<Decoder<float>> MakeSigned32PerAxisDecoder(
const TensorInfo& info,
const void* data)
24 return std::make_unique<ScaledInt32PerAxisDecoder>(
25 static_cast<const int32_t*
>(data),
30 inline std::unique_ptr<Decoder<float>> MakeSigned32Decoder(
const TensorInfo& info,
const void* data)
32 if(info.HasMultipleQuantizationScales())
36 return MakeSigned32PerAxisDecoder(info, data);
40 if (info.GetQuantizationDim().has_value())
45 return MakeSigned32PerAxisDecoder(info, data);
48 const float scale = info.GetQuantizationScale();
54 return std::make_unique<Int32Decoder>(
static_cast<const int32_t*
>(data));
60 return std::make_unique<ScaledInt32Decoder>(
static_cast<const int32_t*
>(data), scale);
67 inline std::unique_ptr<Decoder<T>>
MakeDecoder(
const TensorInfo& info,
const void* data =
nullptr);
78 return std::make_unique<QSymm8PerAxisDecoder>(
79 static_cast<const int8_t*
>(data),
86 return std::make_unique<QASymmS8Decoder>(
87 static_cast<const int8_t*
>(data),
93 return std::make_unique<QASymm8Decoder>(
94 static_cast<const uint8_t*
>(data),
100 return std::make_unique<QSymm16Decoder>(
101 static_cast<const int16_t*
>(data),
107 return std::make_unique<Float16Decoder>(
static_cast<const Half*
>(data));
111 return std::make_unique<Float32Decoder>(
static_cast<const float*
>(data));
115 return MakeSigned32Decoder(info, data);
122 return std::make_unique<QSymm8PerAxisDecoder>(
123 static_cast<const int8_t*
>(data),
129 return std::make_unique<QSymmS8Decoder>(
130 static_cast<const int8_t*
>(data),
137 BOOST_ASSERT_MSG(
false,
"Unsupported Data Type!");
bool HasPerAxisQuantization() const
int32_t GetQuantizationOffset() const
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
std::pair< unsigned int, std::vector< float > > GetPerAxisParams(const armnn::TensorInfo &info)
std::unique_ptr< Decoder< T > > MakeDecoder(const TensorInfo &info, const void *data=nullptr)
DataType GetDataType() const
float GetQuantizationScale() const
#define ARMNN_NO_DEPRECATE_WARN_END