From 42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3 Mon Sep 17 00:00:00 2001 From: Kevin May Date: Thu, 26 Mar 2020 13:34:14 +0000 Subject: IVGCVSW-4447 Add Hal 1_3 Support * Add new 1.3 files HalPolicy, ArmnnDriver, ArmnnDriverImpl * Add new .rc file for 1.3 service * Add ArmnnPreparedModel_1_3 and implement new functions * Update Android.mk with 1.3 driver and service * Refactor ifdef to include ARMNN_ANDROID_NN_V1_3 * Create Utils getMainModel for new 1.3 Model Main Subgraph * Use android Utils to convertToV1_X in ArmnnPrepapredModel_1_3 * Refactor HAL 1.2 convert functions into ConversionUtils_1_2.hpp * Replace ArmnnBurstExecutorWithCache with call to ExecutionBurstServer Signed-off-by: Kevin May Change-Id: I514069e9e1b16bcd1c4abfb5d563d25ac22d02e3 --- ArmnnPreparedModel_1_2.cpp | 128 ++------------------------------------------- 1 file changed, 4 insertions(+), 124 deletions(-) (limited to 'ArmnnPreparedModel_1_2.cpp') diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp index 5031c5ff..76ef4265 100644 --- a/ArmnnPreparedModel_1_2.cpp +++ b/ArmnnPreparedModel_1_2.cpp @@ -2,9 +2,6 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -// Note: the ArmnnBurstExecutorWithCache in this file is based on Android code -// under the Apache 2.0 license. See comment below for details. -// #define LOG_TAG "ArmnnDriver" @@ -215,27 +212,6 @@ Return ArmnnPreparedModel_1_2::execute_1_2( return Execute(request, measureTiming, cb); } -OutputShape ComputeShape(const armnn::TensorInfo& info) -{ - OutputShape shape; - - hidl_vec dimensions; - - armnn::TensorShape tensorShape = info.GetShape(); - const unsigned int numDims = tensorShape.GetNumDimensions(); - dimensions.resize(numDims); - - for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx) - { - dimensions[outputIdx] = tensorShape[outputIdx]; - } - - shape.dimensions = dimensions; - shape.isSufficient = true; - - return shape; -} - template Return ArmnnPreparedModel_1_2::PrepareMemoryForInputs( armnn::InputTensors& inputs, @@ -348,27 +324,6 @@ Return ArmnnPreparedModel_1_2::PrepareMemoryForIO return V1_0::ErrorStatus::NONE; } -void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools) -{ - if (memPools.empty()) - { - return; - } - // Commit output buffers. - // Note that we update *all* pools, even if they aren't actually used as outputs - - // this is simpler and is what the CpuExecutor does. - for (auto& pool : memPools) - { - // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where - // update() has been removed and flush() added. -#if defined(ARMNN_ANDROID_R) // Use the new Android implementation. - pool.flush(); -#else - pool.update(); -#endif - } -} - template Return ArmnnPreparedModel_1_2::executeSynchronously(const V1_0::Request& request, MeasureTiming measureTiming, @@ -514,7 +469,7 @@ bool ArmnnPreparedModel_1_2::ExecuteWithDummyInputs() { std::vector> storage; armnn::InputTensors inputTensors; - for (unsigned int i = 0; i < m_Model.inputIndexes.size(); i++) + for (unsigned int i = 0; i < getMainModel(m_Model).inputIndexes.size(); i++) { const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); storage.emplace_back(inputTensorInfo.GetNumBytes()); @@ -524,7 +479,7 @@ bool ArmnnPreparedModel_1_2::ExecuteWithDummyInputs() } armnn::OutputTensors outputTensors; - for (unsigned int i = 0; i < m_Model.outputIndexes.size(); i++) + for (unsigned int i = 0; i < getMainModel(m_Model).outputIndexes.size(); i++) { const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); storage.emplace_back(outputTensorInfo.GetNumBytes()); @@ -600,77 +555,6 @@ Return ArmnnPreparedModel_1_2::Execute(const V1_ return V1_0::ErrorStatus::NONE; } - -/// This class is strongly inspired by the default implementation in Android named DefaultBurstExecutorWithCache. -/// The original code is licensed under Apache-2.0 and can be found at the following link: -/// https://android.googlesource.com/platform/frameworks/ -/// ml/+/refs/tags/android-10.0.0_r20/nn/common/ExecutionBurstServer.cpp -class ArmnnBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache { -public: - ArmnnBurstExecutorWithCache(V1_2::IPreparedModel* preparedModel) - : m_PreparedModel(preparedModel) - {} - - bool isCacheEntryPresent(int32_t slot) const override - { - const auto it = m_MemoryCache.find(slot); - return (it != m_MemoryCache.end()) && it->second.valid(); - } - - void addCacheEntry(const hidl_memory& memory, int32_t slot) override - { - m_MemoryCache[slot] = memory; - } - - void removeCacheEntry(int32_t slot) override - { - m_MemoryCache.erase(slot); - } - - std::tuple, Timing> execute( - const V1_0::Request& request, const std::vector& slots, - MeasureTiming measure) override - { - ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache::execute"); - hidl_vec pools(slots.size()); - - std::transform(slots.begin(), slots.end(), pools.begin(), [this](int32_t slot) - { - return m_MemoryCache[slot]; - }); - - V1_0::Request fullRequest = request; - fullRequest.pools = std::move(pools); - - // Setup Callback - V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE; - hidl_vec returnedOutputShapes; - Timing returnedTiming; - auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](V1_0::ErrorStatus status, - const hidl_vec& outputShapes, - const Timing& timing) - { - returnedStatus = status; - returnedOutputShapes = outputShapes; - returnedTiming = timing; - }; - - // Execute - ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache executing"); - const Return ret = m_PreparedModel->executeSynchronously(fullRequest, measure, cb); - - if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE) - { - ALOGE("ArmnnPreparedModel_1_2::BurstExecutorWithCache::error executing"); - } - return std::make_tuple(returnedStatus, std::move(returnedOutputShapes), returnedTiming); - } - -private: - V1_2::IPreparedModel* const m_PreparedModel; - std::map m_MemoryCache; -}; - template Return ArmnnPreparedModel_1_2::configureExecutionBurst( const sp& callback, @@ -679,12 +563,10 @@ Return ArmnnPreparedModel_1_2::configureExecutionBurst( V1_2::IPreparedModel::configureExecutionBurst_cb cb) { ALOGV("ArmnnPreparedModel_1_2::configureExecutionBurst"); - const std::shared_ptr executorWithCache = - std::make_shared(this); const sp burst = ExecutionBurstServer::create(callback, requestChannel, resultChannel, - executorWithCache); + this); if (burst == nullptr) { @@ -697,9 +579,7 @@ Return ArmnnPreparedModel_1_2::configureExecutionBurst( return Void(); } - - -#ifdef ARMNN_ANDROID_NN_V1_2 +#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) template class ArmnnPreparedModel_1_2; template bool ArmnnPreparedModel_1_2::ExecuteGraph( std::shared_ptr>& pMemPools, -- cgit v1.2.1