aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2019-11-01 15:21:48 +0000
committerJames Conroy <james.conroy@arm.com>2019-11-12 18:51:20 +0000
commit663c1849b2c359e6d898a763fff2ef013b55a459 (patch)
treeb6212f72e8ca4175d5f0a93210220807460cb727
parentd8df0260ced49a2796ff70e96284cf00eb316bcc (diff)
downloadarmnn-663c1849b2c359e6d898a763fff2ef013b55a459.tar.gz
IVGCVSW-4051 Update ACL pin to 94e0cf960ea6116eb57fa88d9b951f859b52c602
* Add is_initalised() check to CLScheduler in ClContextControl. * Now use CLDepthwiseConvolutionLayer instead of CLDepthwiseConvolutionLayer3x3. * Now use NEDepthwiseConvolutionLayer instead of NEDepthwiseConvolutionLayerOptimized. !android-nn-driver:2212 Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: I509af65315a4322dc820a5cc1bbd36ed6999b4a7
-rwxr-xr-xscripts/get_compute_library.sh2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/cl/ClContextControl.cpp2
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp20
7 files changed, 26 insertions, 19 deletions
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh
index 0a8f86a794..ee45f6c48a 100755
--- a/scripts/get_compute_library.sh
+++ b/scripts/get_compute_library.sh
@@ -10,7 +10,7 @@ CMD=$( basename $0 )
#DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_08" # Release 19.08
#
# For pinning to a revision use this:
-DEFAULT_CLFRAMEWORKREVISION="79f88e6d825402388bb79fc123ee2dfe01985bda" #COMPMID-2313: Implement CL INSTANCE_NORMALIZATION function
+DEFAULT_CLFRAMEWORKREVISION="94e0cf960ea6116eb57fa88d9b951f859b52c602" #COMPMID-2690 Extend Doxygen documents to include GEMM Tuner
usage() {
echo "Usage: $CMD (Use the default clframework SHA)"
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 82430b1520..12d7143122 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -203,6 +203,8 @@ struct DummyConvolutionLayer
DummyConvolutionLayer()
{
typename ConvolutionLayerType::DescriptorType desc;
+ desc.m_StrideX = 1;
+ desc.m_StrideY = 1;
m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp
index 2ffece4ac3..7013b8a4f8 100644
--- a/src/backends/cl/ClContextControl.cpp
+++ b/src/backends/cl/ClContextControl.cpp
@@ -104,7 +104,7 @@ void ClContextControl::DoLoadOpenClRuntime(bool useTunedParameters)
cl::Context context;
cl::CommandQueue commandQueue;
- if (arm_compute::CLScheduler::get().context()() != NULL)
+ if (arm_compute::CLScheduler::get().is_initialised() && arm_compute::CLScheduler::get().context()() != NULL)
{
// Wait for all queued CL requests to finish before reinitialising it.
arm_compute::CLScheduler::get().sync();
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 126950c348..eb837d74b9 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -118,8 +118,8 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
bool use3x3Optimisation = (weightInfo.GetShape()[2] == 3) && (weightInfo.GetShape()[3] == 3);
if (use3x3Optimisation)
{
- m_DepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer3x3>();
- static_cast<arm_compute::CLDepthwiseConvolutionLayer3x3*>(m_DepthwiseConvolutionLayer.get())->configure(
+ m_DepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
+ static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_DepthwiseConvolutionLayer.get())->configure(
&input,
m_KernelTensor.get(),
m_BiasTensor.get(),
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index e8d537f2ef..4b43052365 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -59,20 +59,24 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des
auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+ auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>();
+
if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max)
{
- m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
+ layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
}
else
{
- m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
+ layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
}
+
+ m_ArgMinMaxLayer.reset(layer.release());
}
void NeonArgMinMaxWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonArgMinMaxWorkload_Execute");
- m_ArgMinMaxLayer.run();
+ m_ArgMinMaxLayer->run();
}
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp
index 6301b13718..6e1cc46c13 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp
@@ -8,7 +8,8 @@
#include <backendsCommon/Workload.hpp>
#include <arm_compute/core/Error.h>
-#include <arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h>
+#include <arm_compute/runtime/IFunction.h>
+
namespace armnn
{
@@ -23,7 +24,7 @@ public:
virtual void Execute() const override;
private:
- mutable arm_compute::NEArgMinMaxLayer m_ArgMinMaxLayer;
+ std::unique_ptr<arm_compute::IFunction> m_ArgMinMaxLayer;
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 18085edab5..2093613513 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -120,19 +120,19 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
// Check for optimisation opportunities
arm_compute::Status optimizationStatus =
- arm_compute::NEDepthwiseConvolutionLayerOptimized::validate(inputInfo,
- kernelInfo,
- biasInfo,
- outputInfo,
- padStrideInfo,
- depthMultiplier,
- arm_compute::ActivationLayerInfo(),
- aclDilationInfo);
+ arm_compute::NEDepthwiseConvolutionLayer::validate(inputInfo,
+ kernelInfo,
+ biasInfo,
+ outputInfo,
+ padStrideInfo,
+ depthMultiplier,
+ arm_compute::ActivationLayerInfo(),
+ aclDilationInfo);
if (optimizationStatus.error_code() == arm_compute::ErrorCode::OK)
{
- m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayerOptimized>();
- static_cast<arm_compute::NEDepthwiseConvolutionLayerOptimized*>(
+ m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
+ static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
m_pDepthwiseConvolutionLayer.get())->configure(&input,
m_KernelTensor.get(),
m_BiasTensor.get(),