12 #include <fmt/format.h>
14 using namespace armnn;
20 unsigned int numberOfChannels,
28 return TensorShape({numberOfBatches, numberOfChannels, height, width});
30 return TensorShape({numberOfBatches, height, width, numberOfChannels});
33 + std::to_string(
static_cast<int>(dataLayout)) +
39 unsigned int numberOfChannels,
48 return TensorInfo({numberOfBatches, numberOfChannels, height, width}, dataType);
50 return TensorInfo({numberOfBatches, height, width, numberOfChannels}, dataType);
53 + std::to_string(
static_cast<int>(dataLayout)) +
59 unsigned int numberOfChannels,
69 return TensorInfo({numberOfBatches, depth, height, width, numberOfChannels}, dataType);
71 return TensorInfo({numberOfBatches, numberOfChannels, depth, height, width}, dataType);
74 + std::to_string(
static_cast<int>(dataLayout)) +
81 auto tensor_data =
static_cast<const float *
>(tensorHandle->
Map(
true));
85 float min = tensor_data[0];
86 float max = tensor_data[0];
89 for (
unsigned int val = 1; val < tensor_size; val++)
91 if (tensor_data[val] < min)
93 min = tensor_data[val];
95 else if (tensor_data[val] > max)
97 max = tensor_data[val];
101 tensorHandle->
Unmap();
103 return std::make_pair(min, max);
112 std::vector<unsigned int> newShape;
115 unsigned int dimsSkipped = 0;
116 bool insertRemainder =
false;
120 if (tensorShape[i] == 1 && dimsSkipped < dimsToSkip && !insertRemainder)
125 newShape.push_back(tensorShape[i]);
127 insertRemainder =
true;
129 return TensorShape(
static_cast<unsigned int>(newShape.size()), newShape.data());
136 strippedTensor.
SetShape(strippedShape);
137 return strippedTensor;
144 if (axis < -armnn::numeric_cast<int>(outputDim) || axis > armnn::numeric_cast<int>(tensorShape.
GetNumDimensions()))
154 axis = armnn::numeric_cast<int>(outputDim) + axis;
157 std::vector<unsigned int> outputShape;
161 outputShape.push_back(tensorShape[i]);
163 outputShape.insert(outputShape.begin() + axis, 1);
165 return { outputDim, outputShape.data() };
176 std::vector<unsigned int> newShape;
181 newShape.push_back(1);
187 newShape.push_back(tensorShape[i]);
190 return TensorShape(
static_cast<unsigned int>(newShape.size()), newShape.data());
195 std::vector<unsigned int> squeezedDims;
199 if (tensorShape[i] != 1)
201 squeezedDims.push_back(tensorShape[i]);
208 const unsigned int firstAxisInclusive,
209 const unsigned int lastAxisExclusive)
213 unsigned int count = 1;
214 for (
unsigned int i = firstAxisInclusive; i < lastAxisExclusive; i++)
224 "Required axis index greater than number of dimensions.");
226 "Required axis index lower than negative of the number of dimensions");
228 unsigned int uAxis = axis < 0 ?
229 inputDimension - armnn::numeric_cast<unsigned int>(
abs(axis))
230 : armnn::numeric_cast<unsigned int>(axis);
238 unsigned int count = 1;
239 for (
unsigned int i = axis+1; i < numDim; i++)
248 const std::vector<float>& scales =
info.GetQuantizationScales();
250 if (!
info.HasPerAxisQuantization())
253 std::string(
"Per-axis quantization params not set for tensor of type ") +
258 return { axisFactor, scales };
261 template<
typename PrimitiveType>
267 fmt::format(
"The data does not contain the expected number of elements {} != {}. {}",
272 template<
typename PrimitiveType>
277 std::unique_ptr<float[]> returnBuffer(
new float[tensorInfo.
GetNumElements()]);
282 auto axisDimensionality = tensorInfo.
GetShape()[axis];
287 unsigned int axisIndex;
295 axisIndex = (i / axisFactor) % axisDimensionality;
297 returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
306 returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
320 ::memcpy(buffer.data(), data.data(), data.size());
321 return ToFloatArray<int8_t>(buffer, tensorInfo);
326 return ToFloatArray<uint8_t>(data, tensorInfo);
332 ::memcpy(buffer.data(), data.data(), data.size());
333 return ToFloatArray<int32_t>(buffer, tensorInfo);
339 ::memcpy(buffer.data(), data.data(), data.size());
340 return ToFloatArray<int64_t>(buffer, tensorInfo);
343 fmt::format(
"Unsupported datatype {}. {}",