aboutsummaryrefslogtreecommitdiff
path: root/Utils.cpp
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2020-03-26 13:34:14 +0000
committerKevin May <kevin.may@arm.com>2020-03-26 17:39:25 +0000
commit42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3 (patch)
treee5260f4b9e5e36080269243c1f1cd74f5589b206 /Utils.cpp
parentcae7e927a5b5559f67bb87a1737f6606d5d6f328 (diff)
downloadandroid-nn-driver-42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3.tar.gz
IVGCVSW-4447 Add Hal 1_3 Support
* Add new 1.3 files HalPolicy, ArmnnDriver, ArmnnDriverImpl * Add new .rc file for 1.3 service * Add ArmnnPreparedModel_1_3 and implement new functions * Update Android.mk with 1.3 driver and service * Refactor ifdef to include ARMNN_ANDROID_NN_V1_3 * Create Utils getMainModel for new 1.3 Model Main Subgraph * Use android Utils to convertToV1_X in ArmnnPrepapredModel_1_3 * Refactor HAL 1.2 convert functions into ConversionUtils_1_2.hpp * Replace ArmnnBurstExecutorWithCache with call to ExecutionBurstServer Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: I514069e9e1b16bcd1c4abfb5d563d25ac22d02e3
Diffstat (limited to 'Utils.cpp')
-rw-r--r--Utils.cpp96
1 files changed, 94 insertions, 2 deletions
diff --git a/Utils.cpp b/Utils.cpp
index c548f849..8a17b532 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -103,7 +103,7 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
return ret;
}
-#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
{
@@ -164,13 +164,74 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+
+armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
+{
+ using namespace armnn;
+ bool perChannel = false;
+
+ DataType type;
+ switch (operand.type)
+ {
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ type = armnn::DataType::Float32;
+ break;
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ type = armnn::DataType::Float16;
+ break;
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ type = armnn::DataType::QAsymmU8;
+ break;
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ perChannel=true;
+ ARMNN_FALLTHROUGH;
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ type = armnn::DataType::QSymmS8;
+ break;
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ type = armnn::DataType::QSymmS16;
+ break;
+ case V1_3::OperandType::TENSOR_INT32:
+ type = armnn::DataType::Signed32;
+ break;
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ type = armnn::DataType::QAsymmS8;
+ break;
+ default:
+ throw UnsupportedOperand<V1_3::OperandType>(operand.type);
+ }
+
+ TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
+ if (perChannel)
+ {
+ // ExtraParams is expected to be of type channelQuant
+ BOOST_ASSERT(operand.extraParams.getDiscriminator() ==
+ V1_3::Operand::ExtraParams::hidl_discriminator::channelQuant);
+
+ auto perAxisQuantParams = operand.extraParams.channelQuant();
+
+ ret.SetQuantizationScales(perAxisQuantParams.scales);
+ ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
+ }
+ else
+ {
+ ret.SetQuantizationScale(operand.scale);
+ ret.SetQuantizationOffset(operand.zeroPoint);
+ }
+
+ return ret;
+}
+
+#endif
+
std::string GetOperandSummary(const V1_0::Operand& operand)
{
return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
toString(operand.type);
}
-#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
std::string GetOperandSummary(const V1_2::Operand& operand)
{
@@ -180,6 +241,16 @@ std::string GetOperandSummary(const V1_2::Operand& operand)
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+
+std::string GetOperandSummary(const V1_3::Operand& operand)
+{
+ return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
+ toString(operand.type);
+}
+
+#endif
+
using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
unsigned int elementIndex,
std::ofstream& fileStream);
@@ -449,6 +520,27 @@ void RenameGraphDotFile(const std::string& oldName, const std::string& dumpDir,
}
}
+void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
+{
+ if (memPools.empty())
+ {
+ return;
+ }
+ // Commit output buffers.
+ // Note that we update *all* pools, even if they aren't actually used as outputs -
+ // this is simpler and is what the CpuExecutor does.
+ for (auto& pool : memPools)
+ {
+ // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
+ // update() has been removed and flush() added.
+#if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
+ pool.flush();
+#else
+ pool.update();
+#endif
+ }
+}
+
} // namespace armnn_driver