aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2020-11-12 10:58:48 +0000
committerJim Flynn <jim.flynn@arm.com>2020-11-13 14:25:30 +0000
commit07810fc2fcdd34db74222d90cc73ef12a88e7b78 (patch)
tree8becef8453674822d079815b06ae37310b97d2cf /src/backends/cl/workloads/ClMultiplicationWorkload.cpp
parent8502adeafbbb1db0acefa62560d93453e38dcadb (diff)
downloadarmnn-07810fc2fcdd34db74222d90cc73ef12a88e7b78.tar.gz
IVGCVSW-5328-5329 Fuse Activation
* Added Fused Activation Optimization to both CL and Neon backends. * Added Fused Activation support to all the CL and Neon workloads that support it. * Changed ProfilingTest network to be a Convolution layer followed by an Abs layer rather than an Activation layer. * Added IBackendInternal::OptimizeSubgraphView function that can accept a ModelOptions. * Network will now call OptimizeSubgraphView passing in the ModelOptions. Signed-off-by: Keith Davis <keith.davis@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib536ac3cbafc7d9b35c139ad9a65b7735262cd9d
Diffstat (limited to 'src/backends/cl/workloads/ClMultiplicationWorkload.cpp')
-rw-r--r--src/backends/cl/workloads/ClMultiplicationWorkload.cpp20
1 files changed, 16 insertions, 4 deletions
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index e9b75c3f10..46a1c4bc59 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -4,8 +4,12 @@
//
#include "ClMultiplicationWorkload.hpp"
-#include <cl/ClTensorHandle.hpp>
+
+#include <aclCommon/ArmComputeUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
#include "ClWorkloadUtils.hpp"
namespace armnn
@@ -13,7 +17,8 @@ namespace armnn
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
const TensorInfo& input1,
- const TensorInfo& output)
+ const TensorInfo& output,
+ const ActivationDescriptor* activationDescriptor)
{
const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
@@ -23,6 +28,9 @@ arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
arm_compute::ConvertPolicy::SATURATE :
arm_compute::ConvertPolicy::WRAP;
+ const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+ activationDescriptor);
+
// At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
// when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
// ignored for F32 tensors.
@@ -31,7 +39,8 @@ arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo& input0,
&aclOutput,
1.0f,
convertPolicy,
- arm_compute::RoundingPolicy::TO_ZERO);
+ arm_compute::RoundingPolicy::TO_ZERO,
+ activationInfo);
}
@@ -50,13 +59,16 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc
arm_compute::ConvertPolicy::SATURATE :
arm_compute::ConvertPolicy::WRAP;
+ const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
// Construct
m_PixelWiseMultiplication.configure(&input0,
&input1,
&output,
1.0f,
convertPolicy,
- arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
+ arm_compute::RoundingPolicy::TO_NEAREST_EVEN,
+ activationInfo);
}
void ClMultiplicationWorkload::Execute() const