ArmNN
 23.08
CanonicalUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #define LOG_TAG "arm-armnn-sl"
7 
8 #include "CanonicalUtils.hpp"
9 
10 #include <armnn/Utils.hpp>
11 #include <armnn/utility/Assert.hpp>
13 #include <armnnUtils/Permute.hpp>
14 
15 #include <ghc/filesystem.hpp>
16 namespace fs = ghc::filesystem;
17 #include <half/half.hpp>
18 #include <log/log.h>
19 
20 #include <cassert>
21 #include <cerrno>
22 #include <cinttypes>
23 #include <cstdio>
24 #include <sstream>
25 #include <time.h>
26 #include <variant>
27 
28 namespace armnn
29 {
30 using Half = half_float::half; //import half float implementation
31 } //namespace armnn
32 
33 using namespace android;
34 using namespace android::nn;
35 
36 namespace armnn_driver
37 {
39 
41  const void* input,
42  void* output,
43  const armnn::PermutationVector& mappings)
44 {
45  assert(tensorInfo.GetNumDimensions() == 4U);
46 
47  armnn::DataType dataType = tensorInfo.GetDataType();
48  switch (dataType)
49  {
55  // First swizzle tensor info
56  tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
57  // Then swizzle tensor data
58  armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
59  break;
60  default:
61  VLOG(DRIVER) << "Unknown armnn::DataType for swizzling";
62  assert(0);
63  }
64 }
65 
66 void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
67 {
68  // find the location within the pool
69  assert(location.poolIndex < memPools.size());
70 
71  const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
72  uint8_t* memPoolBuffer = memPool.getBuffer();
73  uint8_t* memory = memPoolBuffer + location.offset;
74  return memory;
75 }
76 
77 void* GetMemoryFromPointer(const Request::Argument& requestArg)
78 {
79  // get the pointer memory
80  auto ptrMemory = std::visit([](std::variant<const void*, void*>&& memory)
81  {
82  if (std::holds_alternative<const void*>(memory))
83  {
84  auto ptr = std::get<const void*>(memory);
85  auto ptrMemory = static_cast<const uint8_t*>(ptr);
86  return const_cast<uint8_t*>(ptrMemory);
87  }
88  else
89  {
90  auto ptr = std::get<void*>(memory);
91  return static_cast<uint8_t*>(ptr);
92  }
93  }, requestArg.location.pointer);
94  return ptrMemory;
95 }
96 
98 {
99  using namespace armnn;
100  bool perChannel = false;
101  bool isScalar = false;
102 
103  DataType type;
104  switch (operand.type)
105  {
106  case OperandType::TENSOR_BOOL8:
108  break;
109  case OperandType::TENSOR_FLOAT32:
111  break;
112  case OperandType::TENSOR_FLOAT16:
114  break;
115  case OperandType::TENSOR_QUANT8_ASYMM:
117  break;
118  case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
119  perChannel=true;
121  case OperandType::TENSOR_QUANT8_SYMM:
123  break;
124  case OperandType::TENSOR_QUANT16_SYMM:
126  break;
127  case OperandType::TENSOR_INT32:
129  break;
130  case OperandType::INT32:
132  isScalar = true;
133  break;
134  case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
136  break;
137  default:
138  throw UnsupportedOperand<OperandType>(operand.type);
139  }
140 
141  TensorInfo ret;
142  if (isScalar)
143  {
145  }
146  else
147  {
148  if (operand.dimensions.size() == 0)
149  {
150  TensorShape tensorShape(Dimensionality::NotSpecified);
151  ret = TensorInfo(tensorShape, type);
152  }
153  else
154  {
155  std::vector<unsigned char> dimensionsSpecificity(operand.dimensions.size(), true);
156  int count = 0;
157  std::for_each(operand.dimensions.data(),
158  operand.dimensions.data() + operand.dimensions.size(),
159  [&](const unsigned int val)
160  {
161  if (val == 0)
162  {
163  dimensionsSpecificity[count] = false;
164  }
165  count++;
166  });
167 
168  TensorShape tensorShape(operand.dimensions.size(),
169  operand.dimensions.data(),
170  reinterpret_cast<const bool *>(dimensionsSpecificity.data()));
171  ret = TensorInfo(tensorShape, type);
172  }
173  }
174 
175  if (perChannel)
176  {
177  // ExtraParams is expected to be of type channelQuant
178  const auto& perAxisQuantParams = std::get<Operand::SymmPerChannelQuantParams>(operand.extraParams);
179 
180  ret.SetQuantizationScales(perAxisQuantParams.scales);
181  ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
182  }
183  else
184  {
185  ret.SetQuantizationScale(operand.scale);
186  ret.SetQuantizationOffset(operand.zeroPoint);
187  }
188  return ret;
189 }
190 
191 std::string GetOperandSummary(const Operand& operand)
192 {
193  std::stringstream ss;
194  ss << "operand dimensions: [ ";
195  for (unsigned int i = 0; i < operand.dimensions.size(); ++i)
196  {
197  ss << operand.dimensions[i] << " ";
198  }
199  ss << "] operand type: " << operand.type;
200  return ss.str();
201 }
202 
203 template <typename TensorType>
204 using DumpElementFunction = void (*)(const TensorType& tensor,
205  unsigned int elementIndex,
206  std::ofstream& fileStream);
207 
208 namespace
209 {
210 template <typename TensorType, typename ElementType, typename PrintableType = ElementType>
211 void DumpTensorElement(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream)
212 {
213  const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
214  fileStream << static_cast<PrintableType>(elements[elementIndex]) << " ";
215 }
216 
217 } // namespace
218 template <typename TensorType>
219 void DumpTensor(const std::string& dumpDir,
220  const std::string& requestName,
221  const std::string& tensorName,
222  const TensorType& tensor)
223 {
224  // The dump directory must exist in advance.
225  fs::path dumpPath = dumpDir;
226  const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
227 
228  std::ofstream fileStream;
229  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
230 
231  if (!fileStream.good())
232  {
233  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
234  return;
235  }
236 
237  DumpElementFunction<TensorType> dumpElementFunction = nullptr;
238 
239  switch (tensor.GetDataType())
240  {
242  {
243  dumpElementFunction = &DumpTensorElement<TensorType, float>;
244  break;
245  }
247  {
248  dumpElementFunction = &DumpTensorElement<TensorType, uint8_t, uint32_t>;
249  break;
250  }
252  {
253  dumpElementFunction = &DumpTensorElement<TensorType, int32_t>;
254  break;
255  }
257  {
258  dumpElementFunction = &DumpTensorElement<TensorType, armnn::Half>;
259  break;
260  }
262  {
263  dumpElementFunction = &DumpTensorElement<TensorType, int8_t, int32_t>;
264  break;
265  }
267  {
268  dumpElementFunction = &DumpTensorElement<TensorType, bool>;
269  break;
270  }
271  default:
272  {
273  dumpElementFunction = nullptr;
274  }
275  }
276 
277  if (dumpElementFunction != nullptr)
278  {
279  const unsigned int numDimensions = tensor.GetNumDimensions();
280  const armnn::TensorShape shape = tensor.GetShape();
281 
282  if (!shape.AreAllDimensionsSpecified())
283  {
284  fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
285  return;
286  }
287  fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
288 
289  if (numDimensions == 0)
290  {
291  fileStream << "# Shape []" << std::endl;
292  return;
293  }
294  fileStream << "# Shape [" << shape[0];
295  for (unsigned int d = 1; d < numDimensions; ++d)
296  {
297  fileStream << "," << shape[d];
298  }
299  fileStream << "]" << std::endl;
300  fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
301  " will be a batch" << std::endl << std::endl;
302 
303  // Split will create a new line after all elements of the first dimension
304  // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
305  unsigned int split = 1;
306  if (numDimensions == 1)
307  {
308  split = shape[0];
309  }
310  else
311  {
312  for (unsigned int i = 1; i < numDimensions; ++i)
313  {
314  split *= shape[i];
315  }
316  }
317 
318  // Print all elements in the tensor
319  for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
320  {
321  (*dumpElementFunction)(tensor, elementIndex, fileStream);
322 
323  if ( (elementIndex + 1) % split == 0 )
324  {
325  fileStream << std::endl;
326  }
327  }
328  fileStream << std::endl;
329  }
330  else
331  {
332  fileStream << "Cannot dump tensor elements: Unsupported data type "
333  << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
334  }
335 
336  if (!fileStream.good())
337  {
338  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
339  }
340 }
341 
342 template void DumpTensor<armnn::ConstTensor>(const std::string& dumpDir,
343  const std::string& requestName,
344  const std::string& tensorName,
345  const armnn::ConstTensor& tensor);
346 
347 template void DumpTensor<armnn::Tensor>(const std::string& dumpDir,
348  const std::string& requestName,
349  const std::string& tensorName,
350  const armnn::Tensor& tensor);
351 
352 void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
353  const std::string& dumpDir,
354  armnn::NetworkId networkId,
355  const armnn::IProfiler* profiler)
356 {
357  // Check if profiling is required.
358  if (!gpuProfilingEnabled)
359  {
360  return;
361  }
362 
363  // The dump directory must exist in advance.
364  if (dumpDir.empty())
365  {
366  return;
367  }
368 
369  ARMNN_ASSERT(profiler);
370 
371  // Set the name of the output profiling file.
372  fs::path dumpPath = dumpDir;
373  const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
374 
375  // Open the ouput file for writing.
376  std::ofstream fileStream;
377  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
378 
379  if (!fileStream.good())
380  {
381  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
382  return;
383  }
384 
385  // Write the profiling info to a JSON file.
386  profiler->Print(fileStream);
387 }
388 
389 std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
390  const std::string& dumpDir)
391 {
392  std::string fileName;
393  // The dump directory must exist in advance.
394  if (dumpDir.empty())
395  {
396  return fileName;
397  }
398 
399  std::string timestamp = GetFileTimestamp();
400  if (timestamp.empty())
401  {
402  return fileName;
403  }
404 
405  // Set the name of the output .dot file.
406  fs::path dumpPath = dumpDir;
407  fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
408  fileName = tempFilePath.string();
409 
410  VLOG(DRIVER) << "Exporting the optimized network graph to file: %s" << fileName.c_str();
411 
412  // Write the network graph to a dot file.
413  std::ofstream fileStream;
414  fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
415 
416  if (!fileStream.good())
417  {
418  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
419  return fileName;
420  }
421 
422  if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
423  {
424  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
425  }
426  return fileName;
427 }
428 
429 std::string SerializeNetwork(const armnn::INetwork& network,
430  const std::string& dumpDir,
431  std::vector<uint8_t>& dataCacheData,
432  bool dataCachingActive)
433 {
434  std::string fileName;
435  bool bSerializeToFile = true;
436  if (dumpDir.empty())
437  {
438  bSerializeToFile = false;
439  }
440  else
441  {
442  std::string timestamp = GetFileTimestamp();
443  if (timestamp.empty())
444  {
445  bSerializeToFile = false;
446  }
447  }
448  if (!bSerializeToFile && !dataCachingActive)
449  {
450  return fileName;
451  }
452 
454  // Serialize the Network
455  serializer->Serialize(network);
456  if (dataCachingActive)
457  {
458  std::stringstream stream;
459  auto serialized = serializer->SaveSerializedToStream(stream);
460  if (serialized)
461  {
462  std::string const serializedString{stream.str()};
463  std::copy(serializedString.begin(),
464  serializedString.end(),
465  std::back_inserter(dataCacheData));
466  }
467  }
468 
469  if (bSerializeToFile)
470  {
471  // Set the name of the output .armnn file.
472  fs::path dumpPath = dumpDir;
473  std::string timestamp = GetFileTimestamp();
474  fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
475  fileName = tempFilePath.string();
476 
477  // Save serialized network to a file
478  std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
479  auto serialized = serializer->SaveSerializedToStream(serializedFile);
480  if (!serialized)
481  {
482  VLOG(DRIVER) << "An error occurred when serializing to file %s" << fileName.c_str();
483  }
484  }
485  return fileName;
486 }
487 
488 bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
489 {
491  {
492  return true;
493  }
494  // Account for the usage of the TensorShape empty constructor
495  if (tensorInfo.GetNumDimensions() == 0)
496  {
497  return true;
498  }
499  return !tensorInfo.GetShape().AreAllDimensionsSpecified();
500 }
501 
503 {
504  return true;
505 }
506 
507 bool isQuantizedOperand(const OperandType& operandType)
508 {
509  if (operandType == OperandType::TENSOR_QUANT8_ASYMM ||
510  operandType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
511  operandType == OperandType::TENSOR_QUANT8_SYMM ||
512  operandType == OperandType::TENSOR_QUANT16_SYMM ||
513  operandType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
514  {
515  return true;
516  }
517  else
518  {
519  return false;
520  }
521 }
522 
523 std::string GetModelSummary(const Model& model)
524 {
525  std::stringstream result;
526 
527  result << model.main.inputIndexes.size() << " input(s), "
528  << model.main.operations.size() << " operation(s), "
529  << model.main.outputIndexes.size() << " output(s), "
530  << model.main.operands.size() << " operand(s) "
531  << std::endl;
532 
533  result << "Inputs: ";
534  for (uint32_t i = 0; i < model.main.inputIndexes.size(); i++)
535  {
536  result << GetOperandSummary(model.main.operands[model.main.inputIndexes[i]]) << ", ";
537  }
538  result << std::endl;
539 
540  result << "Operations: ";
541  for (uint32_t i = 0; i < model.main.operations.size(); i++)
542  {
543  result << model.main.operations[i].type << ", ";
544  }
545  result << std::endl;
546 
547  result << "Outputs: ";
548  for (uint32_t i = 0; i < model.main.outputIndexes.size(); i++)
549  {
550  result << GetOperandSummary(model.main.operands[model.main.outputIndexes[i]]) << ", ";
551  }
552  result << std::endl;
553 
554  return result.str();
555 }
556 
557 std::string GetFileTimestamp()
558 {
559  // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
560  // and getSupportedOperations.txt files)
561  timespec ts;
562  int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
563  std::stringstream ss;
564  if (iRet == 0)
565  {
566  ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
567  }
568  else
569  {
570  VLOG(DRIVER) << "clock_gettime failed with errno " <<
571  std::to_string(errno).c_str() << " : " <<
572  std::strerror(errno);
573  }
574  return ss.str();
575 }
576 
577 void RenameExportedFiles(const std::string& existingSerializedFileName,
578  const std::string& existingDotFileName,
579  const std::string& dumpDir,
580  const armnn::NetworkId networkId)
581 {
582  if (dumpDir.empty())
583  {
584  return;
585  }
586  RenameFile(existingSerializedFileName, std::string("_network.armnn"), dumpDir, networkId);
587  RenameFile(existingDotFileName, std::string("_networkgraph.dot"), dumpDir, networkId);
588 }
589 
590 void RenameFile(const std::string& existingName,
591  const std::string& extension,
592  const std::string& dumpDir,
593  const armnn::NetworkId networkId)
594 {
595  if (existingName.empty() || dumpDir.empty())
596  {
597  return;
598  }
599 
600  fs::path dumpPath = dumpDir;
601  const fs::path newFileName = dumpPath / (std::to_string(networkId) + extension);
602  int iRet = rename(existingName.c_str(), newFileName.c_str());
603  if (iRet != 0)
604  {
605  std::stringstream ss;
606  ss << "rename of [" << existingName << "] to [" << newFileName << "] failed with errno "
607  << std::to_string(errno) << " : " << std::strerror(errno);
608  VLOG(DRIVER) << ss.str().c_str();
609  }
610 }
611 
612 void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
613 {
614  // Commit output buffers.
615  // Note that we update *all* pools, even if they aren't actually used as outputs -
616  // this is simpler and is what the CpuExecutor does.
617  for (auto& pool : memPools)
618  {
619  // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
620  // update() has been removed and flush() added.
621  pool.flush();
622  }
623 }
624 } // namespace armnn_driver
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::DataType::Boolean
@ Boolean
armnn::Tensor
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
armnn_driver::DumpJsonProfilingIfRequired
void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled, const std::string &dumpDir, armnn::NetworkId networkId, const armnn::IProfiler *profiler)
Definition: CanonicalUtils.cpp:352
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:44
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:488
armnn_driver::CommitPools
void CommitPools(std::vector<::android::nn::RunTimePoolInfo > &memPools)
Definition: CanonicalUtils.cpp:612
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::isQuantizedOperand
bool isQuantizedOperand(const OperandType &operandType)
Definition: CanonicalUtils.cpp:507
armnn::IOptimizedNetwork::SerializeToDot
Status SerializeToDot(std::ostream &stream) const
Definition: Network.cpp:698
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::DataType::Float32
@ Float32
armnn::IOptimizedNetwork
Definition: INetwork.hpp:886
armnn::TensorShape::AreAllDimensionsSpecified
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
armnnSerializer
Definition: ISerializer.hpp:11
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn_driver::ExportNetworkGraphToDotFile
std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork &optimizedNetwork, const std::string &dumpDir)
Definition: CanonicalUtils.cpp:389
armnn::DataType::QSymmS8
@ QSymmS8
armnnUtils::Permute
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::DataType::QSymmS16
@ QSymmS16
armnn_driver::OperandType
::android::nn::OperandType OperandType
Definition: ConversionUtils.hpp:46
ISerializer.hpp
Assert.hpp
CanonicalUtils.hpp
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::NetworkId
int NetworkId
Definition: IRuntime.hpp:35
armnn_driver::SwizzleAndroidNn4dTensorToArmNn
void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo &tensorInfo, const void *input, void *output, const armnn::PermutationVector &mappings)
Swizzles tensor data in input according to the dimension mappings.
Definition: CanonicalUtils.cpp:40
armnn::DataType::Float16
@ Float16
armnn_driver::GetFileTimestamp
std::string GetFileTimestamp()
Definition: CanonicalUtils.cpp:557
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn_driver::GetMemoryFromPool
void * GetMemoryFromPool(DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
Returns a pointer to a specific location in a pool`.
Definition: CanonicalUtils.cpp:66
Utils.hpp
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:43
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::Dimensionality::Scalar
@ Scalar
armnn_driver::UnsupportedOperand
Definition: CanonicalUtils.hpp:27
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:172
armnn_driver::SerializeNetwork
std::string SerializeNetwork(const armnn::INetwork &network, const std::string &dumpDir, std::vector< uint8_t > &dataCacheData, bool dataCachingActive)
Definition: CanonicalUtils.cpp:429
armnnSerializer::ISerializer::Create
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnn::PermutationVector
Definition: Types.hpp:308
armnn::Status::Success
@ Success
armnn::Dimensionality::NotSpecified
@ NotSpecified
armnn_driver::DumpElementFunction
void(*)(const TensorType &tensor, unsigned int elementIndex, std::ofstream &fileStream) DumpElementFunction
Definition: CanonicalUtils.cpp:206
armnn_driver::GetModelSummary
std::string GetModelSummary(const Model &model)
Definition: CanonicalUtils.cpp:523
armnn_driver::RenameFile
void RenameFile(const std::string &existingName, const std::string &extension, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:590
Permute.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::DataType::Signed32
@ Signed32
armnn::DataType::QAsymmS8
@ QAsymmS8
ARMNN_FALLTHROUGH
#define ARMNN_FALLTHROUGH
Definition: Utils.hpp:35
armnn_driver::RenameExportedFiles
void RenameExportedFiles(const std::string &existingSerializedFileName, const std::string &existingDotFileName, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:577
armnn_driver::GetMemoryFromPointer
void * GetMemoryFromPointer(const Request::Argument &requestArg)
Definition: CanonicalUtils.cpp:77
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
android::nn
Definition: support_library_service.cpp:10
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn_driver::DumpTensor
void DumpTensor(const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const TensorType &tensor)
Definition: CanonicalUtils.cpp:219
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::IProfiler::Print
void Print(std::ostream &outStream) const
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:630
armnn::IProfiler
Definition: IProfiler.hpp:21
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:502
armnn_driver::GetOperandSummary
std::string GetOperandSummary(const Operand &operand)
Definition: CanonicalUtils.cpp:191
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
android
Definition: support_library_service.cpp:10
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:347