diff options
author | Mike Kelly <mike.kelly@arm.com> | 2019-06-11 16:35:25 +0100 |
---|---|---|
committer | Mike Kelly <mike.kelly@arm.com> | 2019-06-11 16:35:25 +0100 |
commit | b5fdf38f0c6596958fab2b84882f2792a31e585a (patch) | |
tree | d6b578b51c1923c759653d8a04efa90923ad4dd8 /Utils.cpp | |
parent | b92f8901fc34749337ea7a9ad7a2717fc9490de5 (diff) | |
download | android-nn-driver-b5fdf38f0c6596958fab2b84882f2792a31e585a.tar.gz |
IVGCVSW-3181 Add HAL 1.2 support to android-nn-driver
* Updated Android.mk to build HAL 1.2 driver
* Added 1.2 HalPolicy and ArmnnDriver
* Added 1.2 ArmnnPreparedModel
* Updated converters and utilities to accept new HAL 1.2 operands and operand types.
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I62856deab24e106f72cccce09468db4971756fa6
Diffstat (limited to 'Utils.cpp')
-rw-r--r-- | Utils.cpp | 48 |
1 files changed, 46 insertions, 2 deletions
@@ -63,7 +63,7 @@ void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::Ru // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where // "buffer" has been made private and must be accessed via the accessor method "getBuffer". -#if defined(ARMNN_ANDROID_P) // Use the new Android P implementation. +#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) // Use the new Android implementation. uint8_t* memPoolBuffer = memPool.getBuffer(); #else // Fallback to the old Android O implementation. uint8_t* memPoolBuffer = memPool.buffer; @@ -90,7 +90,7 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand) type = armnn::DataType::Signed32; break; default: - throw UnsupportedOperand(operand.type); + throw UnsupportedOperand<V1_0::OperandType>(operand.type); } armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type); @@ -101,12 +101,56 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand) return ret; } +#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2 + +armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand) +{ + armnn::DataType type; + + switch (operand.type) + { + case V1_2::OperandType::TENSOR_FLOAT32: + type = armnn::DataType::Float32; + break; + case V1_2::OperandType::TENSOR_QUANT8_ASYMM: + type = armnn::DataType::QuantisedAsymm8; + break; + case V1_2::OperandType::TENSOR_QUANT16_SYMM: + type = armnn::DataType::QuantisedSymm16; + break; + case V1_2::OperandType::TENSOR_INT32: + type = armnn::DataType::Signed32; + break; + default: + throw UnsupportedOperand<V1_2::OperandType>(operand.type); + } + + armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type); + + ret.SetQuantizationScale(operand.scale); + ret.SetQuantizationOffset(operand.zeroPoint); + + return ret; +} + +#endif + std::string GetOperandSummary(const V1_0::Operand& operand) { return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " + toString(operand.type); } +#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2 + +std::string GetOperandSummary(const V1_2::Operand& operand) +{ + return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " + + toString(operand.type); +} + +#endif + using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream); |