ArmNN
 23.11
CanonicalUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #define LOG_TAG "arm-armnn-sl"
7 
8 #include "CanonicalUtils.hpp"
9 
10 #include <armnn/Utils.hpp>
11 #include <armnn/utility/Assert.hpp>
13 #include <armnnUtils/Permute.hpp>
14 
15 #include <ghc/filesystem.hpp>
16 namespace fs = ghc::filesystem;
17 #include <half/half.hpp>
18 #include <log/log.h>
19 
20 #include <cassert>
21 #include <cerrno>
22 #include <cinttypes>
23 #include <cstdio>
24 #include <sstream>
25 #include <time.h>
26 #include <variant>
27 
28 namespace armnn
29 {
30 using Half = half_float::half; //import half float implementation
31 } //namespace armnn
32 
33 using namespace android;
34 using namespace android::nn;
35 
36 namespace armnn_driver
37 {
39 
41  const void* input,
42  void* output,
43  const armnn::PermutationVector& mappings)
44 {
45  assert(tensorInfo.GetNumDimensions() == 4U);
46 
47  armnn::DataType dataType = tensorInfo.GetDataType();
48  switch (dataType)
49  {
55  // First swizzle tensor info
56  tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
57  // Then swizzle tensor data
58  armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
59  break;
60  default:
61  VLOG(DRIVER) << "Unknown armnn::DataType for swizzling";
62  assert(0);
63  }
64 }
65 
66 void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
67 {
68  // find the location within the pool
69  assert(location.poolIndex < memPools.size());
70 
71  const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
72  uint8_t* memPoolBuffer = memPool.getBuffer();
73  uint8_t* memory = memPoolBuffer + location.offset;
74  return memory;
75 }
76 
77 void* GetMemoryFromPointer(const Request::Argument& requestArg)
78 {
79  // get the pointer memory
80  auto ptrMemory = std::visit([](std::variant<const void*, void*>&& memory)
81  {
82  if (std::holds_alternative<const void*>(memory))
83  {
84  auto ptr = std::get<const void*>(memory);
85  auto ptrMemory = static_cast<const uint8_t*>(ptr);
86  return const_cast<uint8_t*>(ptrMemory);
87  }
88  else
89  {
90  auto ptr = std::get<void*>(memory);
91  return static_cast<uint8_t*>(ptr);
92  }
93  }, requestArg.location.pointer);
94  return ptrMemory;
95 }
96 
98 {
99  using namespace armnn;
100  bool perChannel = false;
101  bool isScalar = false;
102 
103  DataType type;
104  switch (operand.type)
105  {
106  case OperandType::TENSOR_BOOL8:
108  break;
109  case OperandType::TENSOR_FLOAT32:
111  break;
112  case OperandType::TENSOR_FLOAT16:
114  break;
115  case OperandType::TENSOR_QUANT8_ASYMM:
117  break;
118  case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
119  perChannel=true;
121  case OperandType::TENSOR_QUANT8_SYMM:
123  break;
124  case OperandType::TENSOR_QUANT16_SYMM:
126  break;
127  case OperandType::TENSOR_INT32:
129  break;
130  case OperandType::INT32:
132  isScalar = true;
133  break;
134  case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
136  break;
137  default:
138  throw UnsupportedOperand<OperandType>(operand.type);
139  }
140 
141  TensorInfo ret;
142  if (isScalar)
143  {
145  }
146  else
147  {
148  if (operand.dimensions.size() == 0)
149  {
150  TensorShape tensorShape(Dimensionality::NotSpecified);
151  ret = TensorInfo(tensorShape, type);
152  }
153  else
154  {
155  std::vector<unsigned char> dimensionsSpecificity(operand.dimensions.size(), true);
156  int count = 0;
157  std::for_each(operand.dimensions.data(),
158  operand.dimensions.data() + operand.dimensions.size(),
159  [&](const unsigned int val)
160  {
161  if (val == 0)
162  {
163  dimensionsSpecificity[count] = false;
164  }
165  count++;
166  });
167 
168  TensorShape tensorShape(operand.dimensions.size(),
169  operand.dimensions.data(),
170  reinterpret_cast<const bool *>(dimensionsSpecificity.data()));
171  ret = TensorInfo(tensorShape, type);
172  }
173  }
174 
175  if (perChannel)
176  {
177  // ExtraParams is expected to be of type channelQuant
178  const auto& perAxisQuantParams = std::get<Operand::SymmPerChannelQuantParams>(operand.extraParams);
179 
180  ret.SetQuantizationScales(perAxisQuantParams.scales);
181  ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
182  }
183  else
184  {
185  ret.SetQuantizationScale(operand.scale);
186  ret.SetQuantizationOffset(operand.zeroPoint);
187  }
188  return ret;
189 }
190 
191 std::string GetOperandSummary(const Operand& operand)
192 {
193  std::stringstream ss;
194  ss << "operand dimensions: [ ";
195  for (unsigned int i = 0; i < operand.dimensions.size(); ++i)
196  {
197  ss << operand.dimensions[i] << " ";
198  }
199  ss << "] operand type: " << operand.type;
200  return ss.str();
201 }
202 
203 template <typename TensorType>
204 using DumpElementFunction = void (*)(const TensorType& tensor,
205  unsigned int elementIndex,
206  std::ofstream& fileStream);
207 
208 namespace
209 {
210 template <typename TensorType, typename ElementType, typename PrintableType = ElementType>
211 void DumpTensorElement(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream)
212 {
213  const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
214  fileStream << static_cast<PrintableType>(elements[elementIndex]) << " ";
215 }
216 
217 } // namespace
218 template <typename TensorType>
219 void DumpTensor(const std::string& dumpDir,
220  const std::string& requestName,
221  const std::string& tensorName,
222  const TensorType& tensor)
223 {
224  // The dump directory must exist in advance.
225  fs::path dumpPath = dumpDir;
226  const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
227 
228  std::ofstream fileStream;
229  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
230 
231  if (!fileStream.good())
232  {
233  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
234  return;
235  }
236 
237  DumpElementFunction<TensorType> dumpElementFunction = nullptr;
238 
239  switch (tensor.GetDataType())
240  {
242  {
243  dumpElementFunction = &DumpTensorElement<TensorType, float>;
244  break;
245  }
247  {
248  dumpElementFunction = &DumpTensorElement<TensorType, uint8_t, uint32_t>;
249  break;
250  }
252  {
253  dumpElementFunction = &DumpTensorElement<TensorType, int32_t>;
254  break;
255  }
257  {
258  dumpElementFunction = &DumpTensorElement<TensorType, armnn::Half>;
259  break;
260  }
262  {
263  dumpElementFunction = &DumpTensorElement<TensorType, int8_t, int32_t>;
264  break;
265  }
267  {
268  dumpElementFunction = &DumpTensorElement<TensorType, bool>;
269  break;
270  }
271  default:
272  {
273  dumpElementFunction = nullptr;
274  }
275  }
276 
277  if (dumpElementFunction != nullptr)
278  {
279  const unsigned int numDimensions = tensor.GetNumDimensions();
280  const armnn::TensorShape shape = tensor.GetShape();
281 
282  if (!shape.AreAllDimensionsSpecified())
283  {
284  fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
285  return;
286  }
287  fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
288 
289  if (numDimensions == 0)
290  {
291  fileStream << "# Shape []" << std::endl;
292  return;
293  }
294  fileStream << "# Shape [" << shape[0];
295  for (unsigned int d = 1; d < numDimensions; ++d)
296  {
297  fileStream << "," << shape[d];
298  }
299  fileStream << "]" << std::endl;
300  fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
301  " will be a batch" << std::endl << std::endl;
302 
303  // Split will create a new line after all elements of the first dimension
304  // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
305  unsigned int split = 1;
306  if (numDimensions == 1)
307  {
308  split = shape[0];
309  }
310  else
311  {
312  for (unsigned int i = 1; i < numDimensions; ++i)
313  {
314  split *= shape[i];
315  }
316  }
317 
318  // Print all elements in the tensor
319  for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
320  {
321  (*dumpElementFunction)(tensor, elementIndex, fileStream);
322 
323  if ( (elementIndex + 1) % split == 0 )
324  {
325  fileStream << std::endl;
326  }
327  }
328  fileStream << std::endl;
329  }
330  else
331  {
332  fileStream << "Cannot dump tensor elements: Unsupported data type "
333  << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
334  }
335 
336  if (!fileStream.good())
337  {
338  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
339  }
340 }
341 
342 template void DumpTensor<armnn::ConstTensor>(const std::string& dumpDir,
343  const std::string& requestName,
344  const std::string& tensorName,
345  const armnn::ConstTensor& tensor);
346 
347 template void DumpTensor<armnn::Tensor>(const std::string& dumpDir,
348  const std::string& requestName,
349  const std::string& tensorName,
350  const armnn::Tensor& tensor);
351 
352 void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
353  const std::string& dumpDir,
354  armnn::NetworkId networkId,
355  const armnn::IProfiler* profiler)
356 {
357  // Check if profiling is required.
358  if (!gpuProfilingEnabled)
359  {
360  return;
361  }
362 
363  // The dump directory must exist in advance.
364  if (dumpDir.empty())
365  {
366  return;
367  }
368 
369  if (profiler == nullptr)
370  {
371  throw armnn::InvalidArgumentException("DumpJsonProfilingIfRequired: pointer to profiler handed in is null");
372  }
373 
374  // Set the name of the output profiling file.
375  fs::path dumpPath = dumpDir;
376  const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
377 
378  // Open the ouput file for writing.
379  std::ofstream fileStream;
380  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
381 
382  if (!fileStream.good())
383  {
384  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
385  return;
386  }
387 
388  // Write the profiling info to a JSON file.
389  profiler->Print(fileStream);
390 }
391 
392 std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
393  const std::string& dumpDir)
394 {
395  std::string fileName;
396  // The dump directory must exist in advance.
397  if (dumpDir.empty())
398  {
399  return fileName;
400  }
401 
402  std::string timestamp = GetFileTimestamp();
403  if (timestamp.empty())
404  {
405  return fileName;
406  }
407 
408  // Set the name of the output .dot file.
409  fs::path dumpPath = dumpDir;
410  fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
411  fileName = tempFilePath.string();
412 
413  VLOG(DRIVER) << "Exporting the optimized network graph to file: %s" << fileName.c_str();
414 
415  // Write the network graph to a dot file.
416  std::ofstream fileStream;
417  fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
418 
419  if (!fileStream.good())
420  {
421  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
422  return fileName;
423  }
424 
425  if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
426  {
427  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
428  }
429  return fileName;
430 }
431 
432 std::string SerializeNetwork(const armnn::INetwork& network,
433  const std::string& dumpDir,
434  std::vector<uint8_t>& dataCacheData,
435  bool dataCachingActive)
436 {
437  std::string fileName;
438  bool bSerializeToFile = true;
439  if (dumpDir.empty())
440  {
441  bSerializeToFile = false;
442  }
443  else
444  {
445  std::string timestamp = GetFileTimestamp();
446  if (timestamp.empty())
447  {
448  bSerializeToFile = false;
449  }
450  }
451  if (!bSerializeToFile && !dataCachingActive)
452  {
453  return fileName;
454  }
455 
457  // Serialize the Network
458  serializer->Serialize(network);
459  if (dataCachingActive)
460  {
461  std::stringstream stream;
462  auto serialized = serializer->SaveSerializedToStream(stream);
463  if (serialized)
464  {
465  std::string const serializedString{stream.str()};
466  std::copy(serializedString.begin(),
467  serializedString.end(),
468  std::back_inserter(dataCacheData));
469  }
470  }
471 
472  if (bSerializeToFile)
473  {
474  // Set the name of the output .armnn file.
475  fs::path dumpPath = dumpDir;
476  std::string timestamp = GetFileTimestamp();
477  fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
478  fileName = tempFilePath.string();
479 
480  // Save serialized network to a file
481  std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
482  auto serialized = serializer->SaveSerializedToStream(serializedFile);
483  if (!serialized)
484  {
485  VLOG(DRIVER) << "An error occurred when serializing to file %s" << fileName.c_str();
486  }
487  }
488  return fileName;
489 }
490 
491 bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
492 {
494  {
495  return true;
496  }
497  // Account for the usage of the TensorShape empty constructor
498  if (tensorInfo.GetNumDimensions() == 0)
499  {
500  return true;
501  }
502  return !tensorInfo.GetShape().AreAllDimensionsSpecified();
503 }
504 
506 {
507  return true;
508 }
509 
510 bool isQuantizedOperand(const OperandType& operandType)
511 {
512  if (operandType == OperandType::TENSOR_QUANT8_ASYMM ||
513  operandType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
514  operandType == OperandType::TENSOR_QUANT8_SYMM ||
515  operandType == OperandType::TENSOR_QUANT16_SYMM ||
516  operandType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
517  {
518  return true;
519  }
520  else
521  {
522  return false;
523  }
524 }
525 
526 std::string GetModelSummary(const Model& model)
527 {
528  std::stringstream result;
529 
530  result << model.main.inputIndexes.size() << " input(s), "
531  << model.main.operations.size() << " operation(s), "
532  << model.main.outputIndexes.size() << " output(s), "
533  << model.main.operands.size() << " operand(s) "
534  << std::endl;
535 
536  result << "Inputs: ";
537  for (uint32_t i = 0; i < model.main.inputIndexes.size(); i++)
538  {
539  result << GetOperandSummary(model.main.operands[model.main.inputIndexes[i]]) << ", ";
540  }
541  result << std::endl;
542 
543  result << "Operations: ";
544  for (uint32_t i = 0; i < model.main.operations.size(); i++)
545  {
546  result << model.main.operations[i].type << ", ";
547  }
548  result << std::endl;
549 
550  result << "Outputs: ";
551  for (uint32_t i = 0; i < model.main.outputIndexes.size(); i++)
552  {
553  result << GetOperandSummary(model.main.operands[model.main.outputIndexes[i]]) << ", ";
554  }
555  result << std::endl;
556 
557  return result.str();
558 }
559 
560 std::string GetFileTimestamp()
561 {
562  // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
563  // and getSupportedOperations.txt files)
564  timespec ts;
565  int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
566  std::stringstream ss;
567  if (iRet == 0)
568  {
569  ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
570  }
571  else
572  {
573  VLOG(DRIVER) << "clock_gettime failed with errno " <<
574  std::to_string(errno).c_str() << " : " <<
575  std::strerror(errno);
576  }
577  return ss.str();
578 }
579 
580 void RenameExportedFiles(const std::string& existingSerializedFileName,
581  const std::string& existingDotFileName,
582  const std::string& dumpDir,
583  const armnn::NetworkId networkId)
584 {
585  if (dumpDir.empty())
586  {
587  return;
588  }
589  RenameFile(existingSerializedFileName, std::string("_network.armnn"), dumpDir, networkId);
590  RenameFile(existingDotFileName, std::string("_networkgraph.dot"), dumpDir, networkId);
591 }
592 
593 void RenameFile(const std::string& existingName,
594  const std::string& extension,
595  const std::string& dumpDir,
596  const armnn::NetworkId networkId)
597 {
598  if (existingName.empty() || dumpDir.empty())
599  {
600  return;
601  }
602 
603  fs::path dumpPath = dumpDir;
604  const fs::path newFileName = dumpPath / (std::to_string(networkId) + extension);
605  int iRet = rename(existingName.c_str(), newFileName.c_str());
606  if (iRet != 0)
607  {
608  std::stringstream ss;
609  ss << "rename of [" << existingName << "] to [" << newFileName << "] failed with errno "
610  << std::to_string(errno) << " : " << std::strerror(errno);
611  VLOG(DRIVER) << ss.str().c_str();
612  }
613 }
614 
615 void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
616 {
617  // Commit output buffers.
618  // Note that we update *all* pools, even if they aren't actually used as outputs -
619  // this is simpler and is what the CpuExecutor does.
620  for (auto& pool : memPools)
621  {
622  // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
623  // update() has been removed and flush() added.
624  pool.flush();
625  }
626 }
627 } // namespace armnn_driver
armnn::DataType::Boolean
@ Boolean
armnn::Tensor
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
armnn_driver::DumpJsonProfilingIfRequired
void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled, const std::string &dumpDir, armnn::NetworkId networkId, const armnn::IProfiler *profiler)
Definition: CanonicalUtils.cpp:352
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:46
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:491
armnn_driver::CommitPools
void CommitPools(std::vector<::android::nn::RunTimePoolInfo > &memPools)
Definition: CanonicalUtils.cpp:615
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::isQuantizedOperand
bool isQuantizedOperand(const OperandType &operandType)
Definition: CanonicalUtils.cpp:510
armnn::IOptimizedNetwork::SerializeToDot
Status SerializeToDot(std::ostream &stream) const
Definition: Network.cpp:710
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::DataType::Float32
@ Float32
armnn::IOptimizedNetwork
Definition: INetwork.hpp:901
armnn::TensorShape::AreAllDimensionsSpecified
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
armnnSerializer
Definition: ISerializer.hpp:11
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn_driver::ExportNetworkGraphToDotFile
std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork &optimizedNetwork, const std::string &dumpDir)
Definition: CanonicalUtils.cpp:392
armnn::DataType::QSymmS8
@ QSymmS8
armnnUtils::Permute
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:164
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:125
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::DataType::QSymmS16
@ QSymmS16
armnn_driver::OperandType
::android::nn::OperandType OperandType
Definition: ConversionUtils.hpp:48
ISerializer.hpp
Assert.hpp
CanonicalUtils.hpp
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::NetworkId
int NetworkId
Definition: IRuntime.hpp:35
armnn_driver::SwizzleAndroidNn4dTensorToArmNn
void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo &tensorInfo, const void *input, void *output, const armnn::PermutationVector &mappings)
Swizzles tensor data in input according to the dimension mappings.
Definition: CanonicalUtils.cpp:40
armnn::DataType::Float16
@ Float16
armnn_driver::GetFileTimestamp
std::string GetFileTimestamp()
Definition: CanonicalUtils.cpp:560
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn_driver::GetMemoryFromPool
void * GetMemoryFromPool(DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
Returns a pointer to a specific location in a pool`.
Definition: CanonicalUtils.cpp:66
Utils.hpp
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:45
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::Dimensionality::Scalar
@ Scalar
armnn_driver::UnsupportedOperand
Definition: CanonicalUtils.hpp:27
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:182
armnn_driver::SerializeNetwork
std::string SerializeNetwork(const armnn::INetwork &network, const std::string &dumpDir, std::vector< uint8_t > &dataCacheData, bool dataCachingActive)
Definition: CanonicalUtils.cpp:432
armnnSerializer::ISerializer::Create
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnn::PermutationVector
Definition: Types.hpp:314
armnn::Status::Success
@ Success
armnn::Dimensionality::NotSpecified
@ NotSpecified
armnn_driver::DumpElementFunction
void(*)(const TensorType &tensor, unsigned int elementIndex, std::ofstream &fileStream) DumpElementFunction
Definition: CanonicalUtils.cpp:206
armnn_driver::GetModelSummary
std::string GetModelSummary(const Model &model)
Definition: CanonicalUtils.cpp:526
armnn_driver::RenameFile
void RenameFile(const std::string &existingName, const std::string &extension, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:593
Permute.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::DataType::Signed32
@ Signed32
armnn::DataType::QAsymmS8
@ QAsymmS8
ARMNN_FALLTHROUGH
#define ARMNN_FALLTHROUGH
Definition: Utils.hpp:35
armnn_driver::RenameExportedFiles
void RenameExportedFiles(const std::string &existingSerializedFileName, const std::string &existingDotFileName, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:580
armnn_driver::GetMemoryFromPointer
void * GetMemoryFromPointer(const Request::Argument &requestArg)
Definition: CanonicalUtils.cpp:77
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
android::nn
Definition: support_library_service.cpp:10
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn_driver::DumpTensor
void DumpTensor(const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const TensorType &tensor)
Definition: CanonicalUtils.cpp:219
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::IProfiler::Print
void Print(std::ostream &outStream) const
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:630
armnn::IProfiler
Definition: IProfiler.hpp:21
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:505
armnn_driver::GetOperandSummary
std::string GetOperandSummary(const Operand &operand)
Definition: CanonicalUtils.cpp:191
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
android
Definition: support_library_service.cpp:10
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:347