aboutsummaryrefslogtreecommitdiff
path: root/Utils.hpp
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2020-03-26 13:34:14 +0000
committerKevin May <kevin.may@arm.com>2020-03-26 17:39:25 +0000
commit42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3 (patch)
treee5260f4b9e5e36080269243c1f1cd74f5589b206 /Utils.hpp
parentcae7e927a5b5559f67bb87a1737f6606d5d6f328 (diff)
downloadandroid-nn-driver-42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3.tar.gz
IVGCVSW-4447 Add Hal 1_3 Support
* Add new 1.3 files HalPolicy, ArmnnDriver, ArmnnDriverImpl * Add new .rc file for 1.3 service * Add ArmnnPreparedModel_1_3 and implement new functions * Update Android.mk with 1.3 driver and service * Refactor ifdef to include ARMNN_ANDROID_NN_V1_3 * Create Utils getMainModel for new 1.3 Model Main Subgraph * Use android Utils to convertToV1_X in ArmnnPrepapredModel_1_3 * Refactor HAL 1.2 convert functions into ConversionUtils_1_2.hpp * Replace ArmnnBurstExecutorWithCache with call to ExecutionBurstServer Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: I514069e9e1b16bcd1c4abfb5d563d25ac22d02e3
Diffstat (limited to 'Utils.hpp')
-rw-r--r--Utils.hpp74
1 files changed, 63 insertions, 11 deletions
diff --git a/Utils.hpp b/Utils.hpp
index 6256655f..b61ddb21 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -19,11 +19,16 @@
#include <iomanip>
namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
-#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
+#endif
+
namespace armnn_driver
{
@@ -31,6 +36,17 @@ namespace armnn_driver
using DataLocation = ::android::nn::hal::DataLocation;
#endif
+inline const V1_0::Model& getMainModel(const V1_0::Model& model) { return model; }
+inline const V1_1::Model& getMainModel(const V1_1::Model& model) { return model; }
+
+#if defined (ARMNN_ANDROID_NN_V1_2) || defined (ARMNN_ANDROID_NN_V1_3)
+inline const V1_2::Model& getMainModel(const V1_2::Model& model) { return model; }
+#endif
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+inline const V1_3::Subgraph& getMainModel(const V1_3::Model& model) { return model.main; }
+#endif
+
extern const armnn::PermutationVector g_DontPermute;
template <typename OperandType>
@@ -56,42 +72,53 @@ void* GetMemoryFromPool(DataLocation location,
/// Can throw UnsupportedOperand
armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand);
-#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand);
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand);
+#endif
+
std::string GetOperandSummary(const V1_0::Operand& operand);
-#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
std::string GetOperandSummary(const V1_2::Operand& operand);
#endif
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+std::string GetOperandSummary(const V1_3::Operand& operand);
+#endif
+
template <typename HalModel>
std::string GetModelSummary(const HalModel& model)
{
std::stringstream result;
- result << model.inputIndexes.size() << " input(s), " << model.operations.size() << " operation(s), " <<
- model.outputIndexes.size() << " output(s), " << model.operands.size() << " operand(s)" << std::endl;
+ result << getMainModel(model).inputIndexes.size() << " input(s), "
+ << getMainModel(model).operations.size() << " operation(s), "
+ << getMainModel(model).outputIndexes.size() << " output(s), "
+ << getMainModel(model).operands.size() << " operand(s) "
+ << std::endl;
result << "Inputs: ";
- for (uint32_t i = 0; i < model.inputIndexes.size(); i++)
+ for (uint32_t i = 0; i < getMainModel(model).inputIndexes.size(); i++)
{
- result << GetOperandSummary(model.operands[model.inputIndexes[i]]) << ", ";
+ result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).inputIndexes[i]]) << ", ";
}
result << std::endl;
result << "Operations: ";
- for (uint32_t i = 0; i < model.operations.size(); i++)
+ for (uint32_t i = 0; i < getMainModel(model).operations.size(); i++)
{
- result << toString(model.operations[i].type).c_str() << ", ";
+ result << toString(getMainModel(model).operations[i].type).c_str() << ", ";
}
result << std::endl;
result << "Outputs: ";
- for (uint32_t i = 0; i < model.outputIndexes.size(); i++)
+ for (uint32_t i = 0; i < getMainModel(model).outputIndexes.size(); i++)
{
- result << GetOperandSummary(model.operands[model.outputIndexes[i]]) << ", ";
+ result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).outputIndexes[i]]) << ", ";
}
result << std::endl;
@@ -118,4 +145,29 @@ bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
std::string GetFileTimestamp();
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
+inline V1_2::OutputShape ComputeShape(const armnn::TensorInfo& info)
+{
+ V1_2::OutputShape shape;
+
+ android::hardware::hidl_vec<uint32_t> dimensions;
+
+ armnn::TensorShape tensorShape = info.GetShape();
+ const unsigned int numDims = tensorShape.GetNumDimensions();
+ dimensions.resize(numDims);
+
+ for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
+ {
+ dimensions[outputIdx] = tensorShape[outputIdx];
+ }
+
+ shape.dimensions = dimensions;
+ shape.isSufficient = true;
+
+ return shape;
+}
+#endif
+
+void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
+
} // namespace armnn_driver