aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/NeonLayerSupport.cpp
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2020-11-12 10:58:48 +0000
committerJim Flynn <jim.flynn@arm.com>2020-11-13 14:25:30 +0000
commit07810fc2fcdd34db74222d90cc73ef12a88e7b78 (patch)
tree8becef8453674822d079815b06ae37310b97d2cf /src/backends/neon/NeonLayerSupport.cpp
parent8502adeafbbb1db0acefa62560d93453e38dcadb (diff)
downloadarmnn-07810fc2fcdd34db74222d90cc73ef12a88e7b78.tar.gz
IVGCVSW-5328-5329 Fuse Activation
* Added Fused Activation Optimization to both CL and Neon backends. * Added Fused Activation support to all the CL and Neon workloads that support it. * Changed ProfilingTest network to be a Convolution layer followed by an Abs layer rather than an Activation layer. * Added IBackendInternal::OptimizeSubgraphView function that can accept a ModelOptions. * Network will now call OptimizeSubgraphView passing in the ModelOptions. Signed-off-by: Keith Davis <keith.davis@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib536ac3cbafc7d9b35c139ad9a65b7735262cd9d
Diffstat (limited to 'src/backends/neon/NeonLayerSupport.cpp')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp27
1 files changed, 18 insertions, 9 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 0084dbd03f..f55d1c8df6 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -167,7 +167,8 @@ bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
reasonIfUnsupported,
input0,
input1,
- output);
+ output,
+ nullptr);
}
bool NeonLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
@@ -199,7 +200,8 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
var,
beta,
gamma,
- descriptor);
+ descriptor,
+ nullptr);
}
bool NeonLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
@@ -345,7 +347,8 @@ bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
descriptor,
weights,
biases,
- isFastMathEnabled);
+ isFastMathEnabled,
+ nullptr);
}
bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
@@ -373,7 +376,8 @@ bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
output,
descriptor,
weights,
- biases);
+ biases,
+ nullptr);
}
bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
@@ -399,7 +403,8 @@ bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo&
output,
descriptor,
weights,
- biases);
+ biases,
+ nullptr);
}
bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
@@ -474,7 +479,8 @@ bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
output,
weights,
biases,
- descriptor);
+ descriptor,
+ nullptr);
}
bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0,
@@ -611,7 +617,8 @@ bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
reasonIfUnsupported,
input0,
input1,
- output);
+ output,
+ nullptr);
}
bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
@@ -623,7 +630,8 @@ bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
reasonIfUnsupported,
input0,
input1,
- output);
+ output,
+ nullptr);
}
bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
@@ -911,7 +919,8 @@ bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
reasonIfUnsupported,
input0,
input1,
- output);
+ output,
+ nullptr);
}
bool NeonLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,