diff options
Diffstat (limited to '1.3')
-rw-r--r-- | 1.3/ArmnnDriver.hpp | 3 | ||||
-rw-r--r-- | 1.3/ArmnnDriverImpl.cpp | 6 | ||||
-rw-r--r-- | 1.3/ArmnnDriverImpl.hpp | 3 |
3 files changed, 8 insertions, 4 deletions
diff --git a/1.3/ArmnnDriver.hpp b/1.3/ArmnnDriver.hpp index 798c4381..b6b55fae 100644 --- a/1.3/ArmnnDriver.hpp +++ b/1.3/ArmnnDriver.hpp @@ -206,7 +206,8 @@ public: model, cb, model.relaxComputationFloat32toFloat16 - && m_Options.GetFp16Enabled()); + && m_Options.GetFp16Enabled(), + priority); } Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) diff --git a/1.3/ArmnnDriverImpl.cpp b/1.3/ArmnnDriverImpl.cpp index 4b2ff148..6168c9d0 100644 --- a/1.3/ArmnnDriverImpl.cpp +++ b/1.3/ArmnnDriverImpl.cpp @@ -101,7 +101,8 @@ Return<V1_3::ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_3( const DriverOptions& options, const V1_3::Model& model, const sp<V1_3::IPreparedModelCallback>& cb, - bool float32ToFloat16) + bool float32ToFloat16, + V1_3::Priority priority) { ALOGV("ArmnnDriverImpl::prepareArmnnModel_1_3()"); @@ -204,7 +205,8 @@ Return<V1_3::ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_3( runtime.get(), model, options.GetRequestInputsAndOutputsDumpDir(), - options.IsGpuProfilingEnabled())); + options.IsGpuProfilingEnabled(), + priority)); // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if // this is enabled) before the first 'real' inference which removes the overhead of the first inference. diff --git a/1.3/ArmnnDriverImpl.hpp b/1.3/ArmnnDriverImpl.hpp index 8a665ea5..2b39d4e0 100644 --- a/1.3/ArmnnDriverImpl.hpp +++ b/1.3/ArmnnDriverImpl.hpp @@ -30,7 +30,8 @@ public: const DriverOptions& options, const V1_3::Model& model, const android::sp<V1_3::IPreparedModelCallback>& cb, - bool float32ToFloat16 = false); + bool float32ToFloat16 = false, + V1_3::Priority priority = V1_3::Priority::MEDIUM); static Return<void> getCapabilities_1_3(const armnn::IRuntimePtr& runtime, V1_3::IDevice::getCapabilities_1_3_cb cb); |