aboutsummaryrefslogtreecommitdiff
path: root/Utils.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Utils.cpp')
-rw-r--r--Utils.cpp379
1 files changed, 253 insertions, 126 deletions
diff --git a/Utils.cpp b/Utils.cpp
index 930c2b24..58356ac1 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,14 +9,12 @@
#include "Half.hpp"
#include <armnnSerializer/ISerializer.hpp>
+#include <armnnUtils/Filesystem.hpp>
#include <armnnUtils/Permute.hpp>
#include <armnn/Utils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <Filesystem.hpp>
#include <log/log.h>
-#include <cassert>
#include <cerrno>
#include <cinttypes>
#include <sstream>
@@ -31,44 +29,39 @@ namespace armnn_driver
{
const armnn::PermutationVector g_DontPermute{};
-namespace
-{
-
-void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
- void* output, size_t dataTypeSize, const armnn::PermutationVector& mappings)
-{
- assert(inTensorShape.GetNumDimensions() == 4U);
-
- armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, input, output, dataTypeSize);
-}
-
-} // anonymous namespace
-
-void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
+void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensorInfo, const void* input, void* output,
const armnn::PermutationVector& mappings)
{
- assert(tensor.GetNumDimensions() == 4U);
-
- armnn::DataType dataType = tensor.GetDataType();
+ if (tensorInfo.GetNumDimensions() != 4U)
+ {
+ throw armnn::InvalidArgumentException("NumDimensions must be 4");
+ }
+ armnn::DataType dataType = tensorInfo.GetDataType();
switch (dataType)
{
case armnn::DataType::Float16:
case armnn::DataType::Float32:
case armnn::DataType::QAsymmU8:
+ case armnn::DataType::QSymmS16:
case armnn::DataType::QSymmS8:
case armnn::DataType::QAsymmS8:
- SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
+ // First swizzle tensor info
+ tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
+ // Then swizzle tensor data
+ armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
break;
default:
- ALOGW("Unknown armnn::DataType for swizzling");
- assert(0);
+ throw armnn::InvalidArgumentException("Unknown DataType for swizzling");
}
}
void* GetMemoryFromPool(V1_0::DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
{
// find the location within the pool
- assert(location.poolIndex < memPools.size());
+ if (location.poolIndex >= memPools.size())
+ {
+ throw armnn::InvalidArgumentException("The poolIndex is greater than the memPools size.");
+ }
const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
@@ -107,20 +100,19 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
}
else
{
- bool dimensionsSpecificity[5] = { true, true, true, true, true };
- int count = 0;
- std::for_each(operand.dimensions.data(),
- operand.dimensions.data() + operand.dimensions.size(),
- [&](const unsigned int val)
- {
- if (val == 0)
- {
- dimensionsSpecificity[count] = false;
- }
- count++;
- });
+ std::vector<unsigned char> dimensionsSpecificity(operand.dimensions.size(), true);
- TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
+ for (unsigned int i = 0; i < static_cast<unsigned int>(operand.dimensions.size()); ++i)
+ {
+ auto dim = operand.dimensions[i];
+ if (dim == 0)
+ {
+ dimensionsSpecificity[i] = false;
+ }
+ }
+ TensorShape tensorShape(operand.dimensions.size(),
+ operand.dimensions.data(),
+ reinterpret_cast<const bool *>(dimensionsSpecificity.data()));
ret = TensorInfo(tensorShape, type);
}
@@ -195,9 +187,10 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
if (perChannel)
{
- // ExtraParams is expected to be of type channelQuant
- ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
+ if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
+ {
+ throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
+ }
auto perAxisQuantParams = operand.extraParams.channelQuant();
@@ -296,9 +289,10 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
if (perChannel)
{
// ExtraParams is expected to be of type channelQuant
- ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
-
+ if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
+ {
+ throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
+ }
auto perAxisQuantParams = operand.extraParams.channelQuant();
ret.SetQuantizationScales(perAxisQuantParams.scales);
@@ -340,39 +334,27 @@ std::string GetOperandSummary(const V1_3::Operand& operand)
#endif
-using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
+template <typename TensorType>
+using DumpElementFunction = void (*)(const TensorType& tensor,
unsigned int elementIndex,
std::ofstream& fileStream);
namespace
{
-template <typename ElementType, typename PrintableType = ElementType>
-void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
+template <typename TensorType, typename ElementType, typename PrintableType = ElementType>
+void DumpTensorElement(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream)
{
const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
- fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
+ fileStream << static_cast<PrintableType>(elements[elementIndex]) << " ";
}
-constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
-{
- const char* str = "";
-
- switch (tensor.GetNumDimensions())
- {
- case 4: { str = "(BHWC) "; break; }
- case 3: { str = "(HWC) "; break; }
- case 2: { str = "(HW) "; break; }
- default: { str = ""; break; }
- }
-
- return str;
-}
} // namespace
+template <typename TensorType>
void DumpTensor(const std::string& dumpDir,
const std::string& requestName,
const std::string& tensorName,
- const armnn::ConstTensor& tensor)
+ const TensorType& tensor)
{
// The dump directory must exist in advance.
fs::path dumpPath = dumpDir;
@@ -387,38 +369,38 @@ void DumpTensor(const std::string& dumpDir,
return;
}
- DumpElementFunction dumpElementFunction = nullptr;
+ DumpElementFunction<TensorType> dumpElementFunction = nullptr;
switch (tensor.GetDataType())
{
case armnn::DataType::Float32:
{
- dumpElementFunction = &DumpTensorElement<float>;
+ dumpElementFunction = &DumpTensorElement<TensorType, float>;
break;
}
case armnn::DataType::QAsymmU8:
{
- dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
+ dumpElementFunction = &DumpTensorElement<TensorType, uint8_t, uint32_t>;
break;
}
case armnn::DataType::Signed32:
{
- dumpElementFunction = &DumpTensorElement<int32_t>;
+ dumpElementFunction = &DumpTensorElement<TensorType, int32_t>;
break;
}
case armnn::DataType::Float16:
{
- dumpElementFunction = &DumpTensorElement<armnn::Half>;
+ dumpElementFunction = &DumpTensorElement<TensorType, armnn::Half>;
break;
}
case armnn::DataType::QAsymmS8:
{
- dumpElementFunction = &DumpTensorElement<int8_t, int32_t>;
+ dumpElementFunction = &DumpTensorElement<TensorType, int8_t, int32_t>;
break;
}
case armnn::DataType::Boolean:
{
- dumpElementFunction = &DumpTensorElement<bool>;
+ dumpElementFunction = &DumpTensorElement<TensorType, bool>;
break;
}
default:
@@ -430,55 +412,53 @@ void DumpTensor(const std::string& dumpDir,
if (dumpElementFunction != nullptr)
{
const unsigned int numDimensions = tensor.GetNumDimensions();
+ const armnn::TensorShape shape = tensor.GetShape();
- const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
-
- const unsigned int height = (numDimensions >= 3)
- ? tensor.GetShape()[numDimensions - 3]
- : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
-
- const unsigned int width = (numDimensions >= 3)
- ? tensor.GetShape()[numDimensions - 2]
- : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
-
- const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
-
+ if (!shape.AreAllDimensionsSpecified())
+ {
+ fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
+ return;
+ }
fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
- fileStream << "# Dimensions " << MemoryLayoutString(tensor);
- fileStream << "[" << tensor.GetShape()[0];
- for (unsigned int d = 1; d < numDimensions; d++)
+
+ if (numDimensions == 0)
{
- fileStream << "," << tensor.GetShape()[d];
+ fileStream << "# Shape []" << std::endl;
+ return;
+ }
+ fileStream << "# Shape [" << shape[0];
+ for (unsigned int d = 1; d < numDimensions; ++d)
+ {
+ fileStream << "," << shape[d];
}
fileStream << "]" << std::endl;
+ fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
+ " will be a batch" << std::endl << std::endl;
- for (unsigned int e = 0, b = 0; b < batch; ++b)
+ // Split will create a new line after all elements of the first dimension
+ // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
+ unsigned int split = 1;
+ if (numDimensions == 1)
{
- if (numDimensions >= 4)
+ split = shape[0];
+ }
+ else
+ {
+ for (unsigned int i = 1; i < numDimensions; ++i)
{
- fileStream << "# Batch " << b << std::endl;
+ split *= shape[i];
}
- for (unsigned int c = 0; c < channels; c++)
+ }
+
+ // Print all elements in the tensor
+ for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
+ {
+ (*dumpElementFunction)(tensor, elementIndex, fileStream);
+
+ if ( (elementIndex + 1) % split == 0 )
{
- if (numDimensions >= 3)
- {
- fileStream << "# Channel " << c << std::endl;
- }
- for (unsigned int h = 0; h < height; h++)
- {
- for (unsigned int w = 0; w < width; w++, e += channels)
- {
- (*dumpElementFunction)(tensor, e, fileStream);
- }
- fileStream << std::endl;
- }
- e -= channels - 1;
- if (c < channels)
- {
- e -= ((height * width) - 1) * channels;
- }
+ fileStream << std::endl;
}
- fileStream << std::endl;
}
fileStream << std::endl;
}
@@ -494,6 +474,17 @@ void DumpTensor(const std::string& dumpDir,
}
}
+
+template void DumpTensor<armnn::ConstTensor>(const std::string& dumpDir,
+ const std::string& requestName,
+ const std::string& tensorName,
+ const armnn::ConstTensor& tensor);
+
+template void DumpTensor<armnn::Tensor>(const std::string& dumpDir,
+ const std::string& requestName,
+ const std::string& tensorName,
+ const armnn::Tensor& tensor);
+
void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
const std::string& dumpDir,
armnn::NetworkId networkId,
@@ -511,7 +502,11 @@ void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
return;
}
- ARMNN_ASSERT(profiler);
+ if (!profiler)
+ {
+ ALOGW("profiler was null");
+ return;
+ }
// Set the name of the output profiling file.
fs::path dumpPath = dumpDir;
@@ -571,37 +566,59 @@ std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimize
return fileName;
}
-std::string SerializeNetwork(const armnn::INetwork& network, const std::string& dumpDir)
+std::string SerializeNetwork(const armnn::INetwork& network,
+ const std::string& dumpDir,
+ std::vector<uint8_t>& dataCacheData,
+ bool dataCachingActive)
{
std::string fileName;
- // The dump directory must exist in advance.
+ bool bSerializeToFile = true;
if (dumpDir.empty())
{
- return fileName;
+ bSerializeToFile = false;
}
-
- std::string timestamp = GetFileTimestamp();
- if (timestamp.empty())
+ else
+ {
+ std::string timestamp = GetFileTimestamp();
+ if (timestamp.empty())
+ {
+ bSerializeToFile = false;
+ }
+ }
+ if (!bSerializeToFile && !dataCachingActive)
{
return fileName;
}
auto serializer(armnnSerializer::ISerializer::Create());
-
// Serialize the Network
serializer->Serialize(network);
+ if (dataCachingActive)
+ {
+ std::stringstream stream;
+ auto serialized = serializer->SaveSerializedToStream(stream);
+ if (serialized)
+ {
+ std::string const serializedString{stream.str()};
+ std::copy(serializedString.begin(), serializedString.end(), std::back_inserter(dataCacheData));
+ }
+ }
- // Set the name of the output .armnn file.
- fs::path dumpPath = dumpDir;
- fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
- fileName = tempFilePath.string();
-
- // Save serialized network to a file
- std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
- bool serialized = serializer->SaveSerializedToStream(serializedFile);
- if (!serialized)
+ if (bSerializeToFile)
{
- ALOGW("An error occurred when serializing to file %s", fileName.c_str());
+ // Set the name of the output .armnn file.
+ fs::path dumpPath = dumpDir;
+ std::string timestamp = GetFileTimestamp();
+ fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
+ fileName = tempFilePath.string();
+
+ // Save serialized network to a file
+ std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
+ auto serialized = serializer->SaveSerializedToStream(serializedFile);
+ if (!serialized)
+ {
+ ALOGW("An error occurred when serializing to file %s", fileName.c_str());
+ }
}
return fileName;
}
@@ -629,6 +646,53 @@ bool AreDynamicTensorsSupported()
#endif
}
+bool isQuantizedOperand(const V1_0::OperandType& operandType)
+{
+ if (operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
+bool isQuantizedOperand(const V1_2::OperandType& operandType)
+{
+ if (operandType == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
+ operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+ operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
+ operandType == V1_2::OperandType::TENSOR_QUANT16_SYMM )
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+#endif
+
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+bool isQuantizedOperand(const V1_3::OperandType& operandType)
+{
+ if (operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
+ operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+ operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
+ operandType == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
+ operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+#endif
+
std::string GetFileTimestamp()
{
// used to get a timestamp to name diagnostic files (the ArmNN serialized graph
@@ -702,4 +766,67 @@ void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
#endif
}
}
+
+size_t GetSize(const V1_0::Request& request, const V1_0::RequestArgument& requestArgument)
+{
+ return request.pools[requestArgument.location.poolIndex].size();
+}
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+size_t GetSize(const V1_3::Request& request, const V1_0::RequestArgument& requestArgument)
+{
+ if (request.pools[requestArgument.location.poolIndex].getDiscriminator() ==
+ V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory)
+ {
+ return request.pools[requestArgument.location.poolIndex].hidlMemory().size();
+ }
+ else
+ {
+ return 0;
+ }
+}
+#endif
+
+template <typename ErrorStatus, typename Request>
+ErrorStatus ValidateRequestArgument(const Request& request,
+ const armnn::TensorInfo& tensorInfo,
+ const V1_0::RequestArgument& requestArgument,
+ std::string descString)
+{
+ if (requestArgument.location.poolIndex >= request.pools.size())
+ {
+ std::string err = fmt::format("Invalid {} pool at index {} the pool index is greater than the number "
+ "of available pools {}",
+ descString, requestArgument.location.poolIndex, request.pools.size());
+ ALOGE(err.c_str());
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ const size_t size = GetSize(request, requestArgument);
+ size_t totalLength = tensorInfo.GetNumBytes();
+
+ if (static_cast<size_t>(requestArgument.location.offset) + totalLength > size)
+ {
+ std::string err = fmt::format("Invalid {} pool at index {} the offset {} and length {} are greater "
+ "than the pool size {}", descString, requestArgument.location.poolIndex,
+ requestArgument.location.offset, totalLength, size);
+ ALOGE(err.c_str());
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return ErrorStatus::NONE;
+}
+
+template V1_0::ErrorStatus ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(
+ const V1_0::Request& request,
+ const armnn::TensorInfo& tensorInfo,
+ const V1_0::RequestArgument& requestArgument,
+ std::string descString);
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+template V1_3::ErrorStatus ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(
+ const V1_3::Request& request,
+ const armnn::TensorInfo& tensorInfo,
+ const V1_0::RequestArgument& requestArgument,
+ std::string descString);
+#endif
+
} // namespace armnn_driver