aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2017-11-23 11:45:24 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:41:58 +0000
commit04a8f8c4994f1c32b3f16a832c0e6f2599364c02 (patch)
treebb96843720896c60f8876a753b0a61b1efcab73b
parent58c5794b917dae10ff115dd85ec69e2ca41136c1 (diff)
downloadComputeLibrary-04a8f8c4994f1c32b3f16a832c0e6f2599364c02.tar.gz
COMPMID-692 Consistent names for the interfaces
Change-Id: I4b1f3f0da9ff5342c7de7083736fe91871d14e5b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110351 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernels.h8
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h (renamed from arm_compute/core/CL/kernels/CLDepthConcatenateKernel.h)14
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h (renamed from arm_compute/core/CL/kernels/CLDepthConvertKernel.h)2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.h (renamed from arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h)12
-rw-r--r--arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h (renamed from arm_compute/core/CL/kernels/CLL2NormalizeKernel.h)14
-rw-r--r--arm_compute/core/GLES_COMPUTE/GCKernels.h2
-rw-r--r--arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h (renamed from arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.h)14
-rw-r--r--arm_compute/core/NEON/NEKernels.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h (renamed from arm_compute/core/NEON/kernels/NEDepthConcatenateKernel.h)14
-rw-r--r--arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h (renamed from arm_compute/core/NEON/kernels/NEDepthConvertKernel.h)12
-rw-r--r--arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h (renamed from arm_compute/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.h)12
-rw-r--r--arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h (renamed from arm_compute/core/NEON/kernels/NEL2NormalizeKernel.h)14
-rw-r--r--arm_compute/graph/nodes/L2NormalizeLayer.h2
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h8
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h (renamed from arm_compute/runtime/CL/functions/CLDepthConcatenate.h)16
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthConvertLayer.h (renamed from arm_compute/runtime/CL/functions/CLDepthConvert.h)4
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h (renamed from arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h)16
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthwiseSeparableConvolutionLayer.h8
-rw-r--r--arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h (renamed from arm_compute/runtime/CL/functions/CLL2Normalize.h)14
-rw-r--r--arm_compute/runtime/CL/functions/CLLaplacianPyramid.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h6
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/GCFunctions.h2
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h (renamed from arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.h)14
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h8
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h (renamed from arm_compute/runtime/NEON/functions/NEDepthConcatenate.h)16
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h (renamed from arm_compute/runtime/NEON/functions/NEDepthConvert.h)10
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h (renamed from arm_compute/runtime/NEON/functions/NEDepthwiseConvolution.h)14
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h8
-rw-r--r--arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h (renamed from arm_compute/runtime/NEON/functions/NEL2Normalize.h)16
-rw-r--r--arm_compute/runtime/NEON/functions/NELaplacianPyramid.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h6
-rw-r--r--docs/00_introduction.dox10
-rw-r--r--src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp (renamed from src/core/CL/kernels/CLDepthConcatenateKernel.cpp)10
-rw-r--r--src/core/CL/kernels/CLDepthConvertLayerKernel.cpp (renamed from src/core/CL/kernels/CLDepthConvertKernel.cpp)4
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp (renamed from src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp)10
-rw-r--r--src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp (renamed from src/core/CL/kernels/CLL2NormalizeKernel.cpp)8
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp (renamed from src/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.cpp)10
-rw-r--r--src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp (renamed from src/core/NEON/kernels/NEDepthConcatenateKernel.cpp)10
-rw-r--r--src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp (renamed from src/core/NEON/kernels/NEDepthConvertKernel.cpp)8
-rw-r--r--src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp (renamed from src/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.cpp)10
-rw-r--r--src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp (renamed from src/core/NEON/kernels/NEL2NormalizeKernel.cpp)8
-rw-r--r--src/graph/operations/CLSimpleOperations.cpp10
-rw-r--r--src/graph/operations/NESimpleOperations.cpp10
-rw-r--r--src/runtime/CL/functions/CLDepthConcatenateLayer.cpp (renamed from src/runtime/CL/functions/CLDepthConcatenate.cpp)10
-rw-r--r--src/runtime/CL/functions/CLDepthConvertLayer.cpp (renamed from src/runtime/CL/functions/CLDepthConvert.cpp)8
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp (renamed from src/runtime/CL/functions/CLDepthwiseConvolution.cpp)14
-rw-r--r--src/runtime/CL/functions/CLL2NormalizeLayer.cpp (renamed from src/runtime/CL/functions/CLL2Normalize.cpp)10
-rw-r--r--src/runtime/CL/functions/CLLaplacianPyramid.cpp2
-rwxr-xr-xsrc/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.cpp (renamed from src/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.cpp)10
-rw-r--r--src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp (renamed from src/runtime/NEON/functions/NEDepthConcatenate.cpp)10
-rw-r--r--src/runtime/NEON/functions/NEDepthConvertLayer.cpp (renamed from src/runtime/NEON/functions/NEDepthConvert.cpp)8
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp (renamed from src/runtime/NEON/functions/NEDepthwiseConvolution.cpp)14
-rw-r--r--src/runtime/NEON/functions/NEL2NormalizeLayer.cpp (renamed from src/runtime/NEON/functions/NEL2Normalize.cpp)8
-rw-r--r--src/runtime/NEON/functions/NELaplacianPyramid.cpp2
-rw-r--r--tests/benchmark/CL/DepthwiseConvolutionLayer.cpp (renamed from tests/benchmark/CL/DepthwiseConvolution.cpp)14
-rw-r--r--tests/benchmark/CL/SYSTEM/MobileNet.cpp4
-rw-r--r--tests/benchmark/CL/SYSTEM/MobileNetV1.cpp6
-rw-r--r--tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h (renamed from tests/benchmark/fixtures/DepthwiseConvolutionFixture.h)2
-rw-r--r--tests/benchmark/fixtures/MobileNetFixture.h4
-rw-r--r--tests/datasets/DepthwiseConvolutionLayerDataset.h (renamed from tests/datasets/DepthwiseConvolutionDataset.h)24
-rw-r--r--tests/datasets/MobileNetDepthwiseConvolutionLayerDataset.h (renamed from tests/datasets/MobileNetDepthwiseConvolutionDataset.h)6
-rw-r--r--tests/datasets/ShapeDatasets.h6
-rw-r--r--tests/networks/MobileNetNetwork.h8
-rw-r--r--tests/validation/CL/DepthConcatenateLayer.cpp10
-rw-r--r--tests/validation/CL/DepthConvertLayer.cpp (renamed from tests/validation/CL/DepthConvert.cpp)208
-rw-r--r--tests/validation/CL/DepthwiseConvolutionLayer.cpp (renamed from tests/validation/CL/DepthwiseConvolution.cpp)35
-rw-r--r--tests/validation/CL/L2NormalizeLayer.cpp (renamed from tests/validation/CL/L2Normalize.cpp)12
-rw-r--r--tests/validation/CPP/DepthConvertLayer.cpp (renamed from tests/validation/CPP/DepthConvert.cpp)2
-rw-r--r--tests/validation/CPP/DepthConvertLayer.h (renamed from tests/validation/CPP/DepthConvert.h)0
-rw-r--r--tests/validation/CPP/DepthwiseConvolutionLayer.cpp (renamed from tests/validation/CPP/DepthwiseConvolution.cpp)2
-rw-r--r--tests/validation/CPP/DepthwiseConvolutionLayer.h (renamed from tests/validation/CPP/DepthwiseConvolution.h)0
-rw-r--r--tests/validation/CPP/DepthwiseSeparableConvolutionLayer.cpp2
-rw-r--r--tests/validation/CPP/L2NormalizeLayer.cpp (renamed from tests/validation/CPP/L2Normalize.cpp)2
-rw-r--r--tests/validation/CPP/L2NormalizeLayer.h (renamed from tests/validation/CPP/L2Normalize.h)0
-rw-r--r--tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp6
-rw-r--r--tests/validation/NEON/DepthConcatenateLayer.cpp12
-rw-r--r--tests/validation/NEON/DepthConvertLayer.cpp (renamed from tests/validation/NEON/DepthConvert.cpp)208
-rw-r--r--tests/validation/NEON/DepthwiseConvolutionLayer.cpp (renamed from tests/validation/NEON/DepthwiseConvolution.cpp)35
-rw-r--r--tests/validation/NEON/L2NormalizeLayer.cpp (renamed from tests/validation/NEON/L2Normalize.cpp)12
-rw-r--r--tests/validation/fixtures/DepthConcatenateLayerFixture.h2
-rw-r--r--tests/validation/fixtures/DepthConvertLayerFixture.h (renamed from tests/validation/fixtures/DepthConvertFixture.h)12
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h (renamed from tests/validation/fixtures/DepthwiseConvolutionFixture.h)16
-rw-r--r--tests/validation/fixtures/L2NormalizeLayerFixture.h (renamed from tests/validation/fixtures/L2NormalizeFixture.h)4
83 files changed, 592 insertions, 568 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index 1ffbad90cf..9da0e5ab3a 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -42,9 +42,9 @@
#include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
#include "arm_compute/core/CL/kernels/CLColorConvertKernel.h"
#include "arm_compute/core/CL/kernels/CLConvolutionKernel.h"
-#include "arm_compute/core/CL/kernels/CLDepthConcatenateKernel.h"
-#include "arm_compute/core/CL/kernels/CLDepthConvertKernel.h"
-#include "arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h"
@@ -76,7 +76,7 @@
#include "arm_compute/core/CL/kernels/CLHistogramKernel.h"
#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
#include "arm_compute/core/CL/kernels/CLIntegralImageKernel.h"
-#include "arm_compute/core/CL/kernels/CLL2NormalizeKernel.h"
+#include "arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLLKTrackerKernel.h"
#include "arm_compute/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h"
#include "arm_compute/core/CL/kernels/CLMagnitudePhaseKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLDepthConcatenateKernel.h b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
index 2833d8ec23..467bdfab3b 100644
--- a/arm_compute/core/CL/kernels/CLDepthConcatenateKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
@@ -35,21 +35,21 @@ class ICLTensor;
/** Interface for the depth concatenate kernel.
* The input tensor will be concatenated into the output tensor.
*/
-class CLDepthConcatenateKernel : public ICLKernel
+class CLDepthConcatenateLayerKernel : public ICLKernel
{
public:
/** Default constructor */
- CLDepthConcatenateKernel();
+ CLDepthConcatenateLayerKernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthConcatenateKernel(const CLDepthConcatenateKernel &) = delete;
+ CLDepthConcatenateLayerKernel(const CLDepthConcatenateLayerKernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthConcatenateKernel &operator=(const CLDepthConcatenateKernel &) = delete;
+ CLDepthConcatenateLayerKernel &operator=(const CLDepthConcatenateLayerKernel &) = delete;
/** Allow instances of this class to be moved */
- CLDepthConcatenateKernel(CLDepthConcatenateKernel &&) = default;
+ CLDepthConcatenateLayerKernel(CLDepthConcatenateLayerKernel &&) = default;
/** Allow instances of this class to be moved */
- CLDepthConcatenateKernel &operator=(CLDepthConcatenateKernel &&) = default;
+ CLDepthConcatenateLayerKernel &operator=(CLDepthConcatenateLayerKernel &&) = default;
/** Default destructor */
- ~CLDepthConcatenateKernel() = default;
+ ~CLDepthConcatenateLayerKernel() = default;
/** Initialise the kernel's inputs and output
*
* @param[in] input Input tensor. Data types supported: QS8/QS16/F16/F32.
diff --git a/arm_compute/core/CL/kernels/CLDepthConvertKernel.h b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
index da70bff0fd..3a6310d69e 100644
--- a/arm_compute/core/CL/kernels/CLDepthConvertKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
@@ -36,7 +36,7 @@ class ICLTensor;
/** Interface for the depth conversion kernel.
*
*/
-class CLDepthConvertKernel : public ICLSimple2DKernel
+class CLDepthConvertLayerKernel : public ICLSimple2DKernel
{
public:
/** Set the input and output of the kernel.
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.h
index f9689a4329..eb62465f84 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.h
@@ -32,19 +32,19 @@ class ICLTensor;
/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor.
*/
-class CLDepthwiseConvolution3x3Kernel : public ICLKernel
+class CLDepthwiseConvolutionLayer3x3Kernel : public ICLKernel
{
public:
/** Default constructor */
- CLDepthwiseConvolution3x3Kernel();
+ CLDepthwiseConvolutionLayer3x3Kernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolution3x3Kernel(const CLDepthwiseConvolution3x3Kernel &) = delete;
+ CLDepthwiseConvolutionLayer3x3Kernel(const CLDepthwiseConvolutionLayer3x3Kernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolution3x3Kernel &operator=(const CLDepthwiseConvolution3x3Kernel &) = delete;
+ CLDepthwiseConvolutionLayer3x3Kernel &operator=(const CLDepthwiseConvolutionLayer3x3Kernel &) = delete;
/** Default Move Constructor. */
- CLDepthwiseConvolution3x3Kernel(CLDepthwiseConvolution3x3Kernel &&) = default;
+ CLDepthwiseConvolutionLayer3x3Kernel(CLDepthwiseConvolutionLayer3x3Kernel &&) = default;
/** Default move assignment operator. */
- CLDepthwiseConvolution3x3Kernel &operator=(CLDepthwiseConvolution3x3Kernel &&) = default;
+ CLDepthwiseConvolutionLayer3x3Kernel &operator=(CLDepthwiseConvolutionLayer3x3Kernel &&) = default;
/** Initialize the function's source, destination, conv and border_size.
*
* @param[in] input Source tensor. DataType supported: QASYMM8/F32.
diff --git a/arm_compute/core/CL/kernels/CLL2NormalizeKernel.h b/arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h
index 2056b4e615..f7d717119b 100644
--- a/arm_compute/core/CL/kernels/CLL2NormalizeKernel.h
+++ b/arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h
@@ -32,21 +32,21 @@ namespace arm_compute
class ICLTensor;
/** Interface for the reduction operation kernel */
-class CLL2NormalizeKernel : public ICLKernel
+class CLL2NormalizeLayerKernel : public ICLKernel
{
public:
/** Default constructor */
- CLL2NormalizeKernel();
+ CLL2NormalizeLayerKernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLL2NormalizeKernel(const CLL2NormalizeKernel &) = delete;
+ CLL2NormalizeLayerKernel(const CLL2NormalizeLayerKernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLL2NormalizeKernel &operator=(const CLL2NormalizeKernel &) = delete;
+ CLL2NormalizeLayerKernel &operator=(const CLL2NormalizeLayerKernel &) = delete;
/** Allow instances of this class to be moved */
- CLL2NormalizeKernel(CLL2NormalizeKernel &&) = default;
+ CLL2NormalizeLayerKernel(CLL2NormalizeLayerKernel &&) = default;
/** Allow instances of this class to be moved */
- CLL2NormalizeKernel &operator=(CLL2NormalizeKernel &&) = default;
+ CLL2NormalizeLayerKernel &operator=(CLL2NormalizeLayerKernel &&) = default;
/** Default destructor */
- ~CLL2NormalizeKernel() = default;
+ ~CLL2NormalizeLayerKernel() = default;
/** Set the input and output tensors.
*
diff --git a/arm_compute/core/GLES_COMPUTE/GCKernels.h b/arm_compute/core/GLES_COMPUTE/GCKernels.h
index 57d11d5f18..9831e25299 100644
--- a/arm_compute/core/GLES_COMPUTE/GCKernels.h
+++ b/arm_compute/core/GLES_COMPUTE/GCKernels.h
@@ -29,7 +29,7 @@
#include "arm_compute/core/GLES_COMPUTE/kernels/GCActivationLayerKernel.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCCol2ImKernel.h"
-#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.h"
+#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCDropoutKernel.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h"
diff --git a/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.h b/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h
index 9a34a9a9c5..ce220cc564 100644
--- a/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.h
+++ b/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h
@@ -35,21 +35,21 @@ class IGCTensor;
/** Interface for the depth concatenate kernel.
* The input tensor will be concatenated into the output tensor.
*/
-class GCDepthConcatenateKernel : public IGCKernel
+class GCDepthConcatenateLayerKernel : public IGCKernel
{
public:
/** Default constructor */
- GCDepthConcatenateKernel();
+ GCDepthConcatenateLayerKernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- GCDepthConcatenateKernel(const GCDepthConcatenateKernel &) = delete;
+ GCDepthConcatenateLayerKernel(const GCDepthConcatenateLayerKernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- GCDepthConcatenateKernel &operator=(const GCDepthConcatenateKernel &) = delete;
+ GCDepthConcatenateLayerKernel &operator=(const GCDepthConcatenateLayerKernel &) = delete;
/** Allow instances of this class to be moved */
- GCDepthConcatenateKernel(GCDepthConcatenateKernel &&) = default;
+ GCDepthConcatenateLayerKernel(GCDepthConcatenateLayerKernel &&) = default;
/** Allow instances of this class to be moved */
- GCDepthConcatenateKernel &operator=(GCDepthConcatenateKernel &&) = default;
+ GCDepthConcatenateLayerKernel &operator=(GCDepthConcatenateLayerKernel &&) = default;
/** Default destructor */
- ~GCDepthConcatenateKernel() = default;
+ ~GCDepthConcatenateLayerKernel() = default;
/** Initialise the kernel's inputs and output
*
* @param[in] input Input tensor. Data types supported: F16/F32.
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index b23e2ac5a3..6c31fa4fb1 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -44,9 +44,9 @@
#include "arm_compute/core/NEON/kernels/NEConvolutionKernel.h"
#include "arm_compute/core/NEON/kernels/NECumulativeDistributionKernel.h"
#include "arm_compute/core/NEON/kernels/NEDeconvolutionLayerUpsampleKernel.h"
-#include "arm_compute/core/NEON/kernels/NEDepthConcatenateKernel.h"
-#include "arm_compute/core/NEON/kernels/NEDepthConvertKernel.h"
-#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseIm2ColKernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseVectorToTensorKernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h"
@@ -83,7 +83,7 @@
#include "arm_compute/core/NEON/kernels/NEHistogramKernel.h"
#include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h"
#include "arm_compute/core/NEON/kernels/NEIntegralImageKernel.h"
-#include "arm_compute/core/NEON/kernels/NEL2NormalizeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NELKTrackerKernel.h"
#include "arm_compute/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.h"
#include "arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEDepthConcatenateKernel.h b/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h
index 784dfc3f5c..6029873f22 100644
--- a/arm_compute/core/NEON/kernels/NEDepthConcatenateKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h
@@ -34,21 +34,21 @@ class ITensor;
/** Interface for the depth concatenate kernel.
* The input tensor will be concatenated into the output tensor.
*/
-class NEDepthConcatenateKernel : public INEKernel
+class NEDepthConcatenateLayerKernel : public INEKernel
{
public:
/** Default constructor */
- NEDepthConcatenateKernel();
+ NEDepthConcatenateLayerKernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthConcatenateKernel(const NEDepthConcatenateKernel &) = delete;
+ NEDepthConcatenateLayerKernel(const NEDepthConcatenateLayerKernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthConcatenateKernel &operator=(const NEDepthConcatenateKernel &) = delete;
+ NEDepthConcatenateLayerKernel &operator=(const NEDepthConcatenateLayerKernel &) = delete;
/** Allow instances of this class to be moved */
- NEDepthConcatenateKernel(NEDepthConcatenateKernel &&) = default;
+ NEDepthConcatenateLayerKernel(NEDepthConcatenateLayerKernel &&) = default;
/** Allow instances of this class to be moved */
- NEDepthConcatenateKernel &operator=(NEDepthConcatenateKernel &&) = default;
+ NEDepthConcatenateLayerKernel &operator=(NEDepthConcatenateLayerKernel &&) = default;
/** Default destructor */
- ~NEDepthConcatenateKernel() = default;
+ ~NEDepthConcatenateLayerKernel() = default;
/** Initialise the kernel's inputs and output
*
* @param[in] input Input tensor. Data types supported: QS8/QS16/F16/F32.
diff --git a/arm_compute/core/NEON/kernels/NEDepthConvertKernel.h b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h
index 332406f239..af51ded87a 100644
--- a/arm_compute/core/NEON/kernels/NEDepthConvertKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h
@@ -34,19 +34,19 @@ namespace arm_compute
class ITensor;
/** Depth conversion kernel */
-class NEDepthConvertKernel : public INEKernel
+class NEDepthConvertLayerKernel : public INEKernel
{
public:
/** Default constructor*/
- NEDepthConvertKernel();
+ NEDepthConvertLayerKernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthConvertKernel(const NEDepthConvertKernel &) = delete;
+ NEDepthConvertLayerKernel(const NEDepthConvertLayerKernel &) = delete;
/** Default move constructor */
- NEDepthConvertKernel(NEDepthConvertKernel &&) = default;
+ NEDepthConvertLayerKernel(NEDepthConvertLayerKernel &&) = default;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthConvertKernel &operator=(const NEDepthConvertKernel &) = delete;
+ NEDepthConvertLayerKernel &operator=(const NEDepthConvertLayerKernel &) = delete;
/** Default move assignment operator */
- NEDepthConvertKernel &operator=(NEDepthConvertKernel &&) = default;
+ NEDepthConvertLayerKernel &operator=(NEDepthConvertLayerKernel &&) = default;
/** Set the input and output of the kernel
*
* Valid conversions Input -> Output :
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
index a32a06b61d..b8f01cb635 100644
--- a/arm_compute/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
@@ -32,19 +32,19 @@ class ITensor;
/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor.
*/
-class NEDepthwiseConvolution3x3Kernel : public INEKernel
+class NEDepthwiseConvolutionLayer3x3Kernel : public INEKernel
{
public:
/** Default constructor */
- NEDepthwiseConvolution3x3Kernel();
+ NEDepthwiseConvolutionLayer3x3Kernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthwiseConvolution3x3Kernel(const NEDepthwiseConvolution3x3Kernel &) = delete;
+ NEDepthwiseConvolutionLayer3x3Kernel(const NEDepthwiseConvolutionLayer3x3Kernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthwiseConvolution3x3Kernel &operator=(const NEDepthwiseConvolution3x3Kernel &) = delete;
+ NEDepthwiseConvolutionLayer3x3Kernel &operator=(const NEDepthwiseConvolutionLayer3x3Kernel &) = delete;
/** Default Move Constructor. */
- NEDepthwiseConvolution3x3Kernel(NEDepthwiseConvolution3x3Kernel &&) = default;
+ NEDepthwiseConvolutionLayer3x3Kernel(NEDepthwiseConvolutionLayer3x3Kernel &&) = default;
/** Default move assignment operator. */
- NEDepthwiseConvolution3x3Kernel &operator=(NEDepthwiseConvolution3x3Kernel &&) = default;
+ NEDepthwiseConvolutionLayer3x3Kernel &operator=(NEDepthwiseConvolutionLayer3x3Kernel &&) = default;
/** Initialize the function's source, destination, conv and border_size.
*
* @param[in] input Source tensor. DataType supported: F32.
diff --git a/arm_compute/core/NEON/kernels/NEL2NormalizeKernel.h b/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
index fbbe4bee99..7aa5116b68 100644
--- a/arm_compute/core/NEON/kernels/NEL2NormalizeKernel.h
+++ b/arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h
@@ -31,21 +31,21 @@ namespace arm_compute
class ITensor;
/** Interface for performing a L2 normalize on a given axis given the square sum of it in this axis */
-class NEL2NormalizeKernel : public INEKernel
+class NEL2NormalizeLayerKernel : public INEKernel
{
public:
/** Default constructor */
- NEL2NormalizeKernel();
+ NEL2NormalizeLayerKernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEL2NormalizeKernel(const NEL2NormalizeKernel &) = delete;
+ NEL2NormalizeLayerKernel(const NEL2NormalizeLayerKernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEL2NormalizeKernel &operator=(const NEL2NormalizeKernel &) = delete;
+ NEL2NormalizeLayerKernel &operator=(const NEL2NormalizeLayerKernel &) = delete;
/** Allow instances of this class to be moved */
- NEL2NormalizeKernel(NEL2NormalizeKernel &&) = default;
+ NEL2NormalizeLayerKernel(NEL2NormalizeLayerKernel &&) = default;
/** Allow instances of this class to be moved */
- NEL2NormalizeKernel &operator=(NEL2NormalizeKernel &&) = default;
+ NEL2NormalizeLayerKernel &operator=(NEL2NormalizeLayerKernel &&) = default;
/** Default destructor */
- ~NEL2NormalizeKernel() = default;
+ ~NEL2NormalizeLayerKernel() = default;
/** Set the input and output tensors.
*
* @param[in] input Source tensor. Data types supported: F32.
diff --git a/arm_compute/graph/nodes/L2NormalizeLayer.h b/arm_compute/graph/nodes/L2NormalizeLayer.h
index fc2bbc2d19..a423306bd2 100644
--- a/arm_compute/graph/nodes/L2NormalizeLayer.h
+++ b/arm_compute/graph/nodes/L2NormalizeLayer.h
@@ -33,7 +33,7 @@ namespace arm_compute
{
namespace graph
{
-/** L2Normalize layer node */
+/** L2NormalizeLayer layer node */
class L2NormalizeLayer final : public INode
{
public:
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 9a20769ca1..f6ecef7a51 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -42,9 +42,9 @@
#include "arm_compute/runtime/CL/functions/CLColorConvert.h"
#include "arm_compute/runtime/CL/functions/CLConvolution.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConcatenate.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConvert.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
+#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDepthwiseSeparableConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDequantizationLayer.h"
#include "arm_compute/runtime/CL/functions/CLDerivative.h"
@@ -72,7 +72,7 @@
#include "arm_compute/runtime/CL/functions/CLHarrisCorners.h"
#include "arm_compute/runtime/CL/functions/CLHistogram.h"
#include "arm_compute/runtime/CL/functions/CLIntegralImage.h"
-#include "arm_compute/runtime/CL/functions/CLL2Normalize.h"
+#include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
#include "arm_compute/runtime/CL/functions/CLLaplacianPyramid.h"
#include "arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h"
#include "arm_compute/runtime/CL/functions/CLLocallyConnectedLayer.h"
diff --git a/arm_compute/runtime/CL/functions/CLDepthConcatenate.h b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
index 77997f6bd1..00b3b66c97 100644
--- a/arm_compute/runtime/CL/functions/CLDepthConcatenate.h
+++ b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
@@ -29,7 +29,7 @@
#include "arm_compute/core/Window.h"
#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/core/CL/kernels/CLDepthConcatenateKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
#include <memory>
@@ -42,14 +42,14 @@ class ICLTensor;
/** Basic function to execute concatenate tensors along z axis. This function calls the following kernels:
*
* -# @ref CLFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
- * -# @ref CLDepthConcatenateKernel
+ * -# @ref CLDepthConcatenateLayerKernel
*
*/
-class CLDepthConcatenate : public IFunction
+class CLDepthConcatenateLayer : public IFunction
{
public:
/** Default constructor */
- CLDepthConcatenate();
+ CLDepthConcatenateLayer();
/** Initialise the kernel's inputs vector and output.
*
* @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QS8/QS16/F16/F32.
@@ -61,10 +61,10 @@ public:
void run() override;
private:
- std::vector<ICLTensor *> _inputs_vector;
- std::unique_ptr<CLDepthConcatenateKernel[]> _concat_kernels_vector;
- std::unique_ptr<CLFillBorderKernel[]> _border_handlers_vector;
- unsigned int _num_inputs;
+ std::vector<ICLTensor *> _inputs_vector;
+ std::unique_ptr<CLDepthConcatenateLayerKernel[]> _concat_kernels_vector;
+ std::unique_ptr<CLFillBorderKernel[]> _border_handlers_vector;
+ unsigned int _num_inputs;
};
}
#endif /* __ARM_COMPUTE_CLDEPTHCONCATENATE_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLDepthConvert.h b/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
index 9a4c63dd6d..c84dc15508 100644
--- a/arm_compute/runtime/CL/functions/CLDepthConvert.h
+++ b/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
@@ -33,8 +33,8 @@ namespace arm_compute
{
class ICLTensor;
-/** Basic function to run @ref CLDepthConvertKernel. */
-class CLDepthConvert : public ICLSimpleFunction
+/** Basic function to run @ref CLDepthConvertLayerKernel. */
+class CLDepthConvertLayer : public ICLSimpleFunction
{
public:
/** Initialize the function's source, destination
diff --git a/arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
index 40eb8523fb..f7899415d2 100644
--- a/arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h
+++ b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
@@ -24,7 +24,7 @@
#ifndef __ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H__
#define __ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H__
-#include "arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h"
@@ -40,15 +40,15 @@ class ICLTensor;
/** Basic function to execute a depthwise convolution for kernel size 3x3xC. This function calls the following OpenCL kernels:
*
- * -# @ref CLDepthwiseConvolution3x3Kernel
+ * -# @ref CLDepthwiseConvolutionLayer3x3Kernel
* -# @ref CLFillBorderKernel (if pad_x or pad_y > 0)
*
*/
-class CLDepthwiseConvolution3x3 : public IFunction
+class CLDepthwiseConvolutionLayer3x3 : public IFunction
{
public:
/** Default constructor */
- CLDepthwiseConvolution3x3();
+ CLDepthwiseConvolutionLayer3x3();
/** Initialize the function's source, destination, conv and border_size.
*
* @param[in, out] input Source tensor. Data type supported: QASYMM8/F32. (Written to only for border filling).
@@ -64,8 +64,8 @@ public:
void run() override;
private:
- CLDepthwiseConvolution3x3Kernel _kernel;
- CLFillBorderKernel _border_handler;
+ CLDepthwiseConvolutionLayer3x3Kernel _kernel;
+ CLFillBorderKernel _border_handler;
};
/** Basic function to execute a generic depthwise convolution. This function calls the following OpenCL kernels:
@@ -76,11 +76,11 @@ private:
* -# @ref CLFillBorderKernel (if pad_x or pad_y > 0)
*
*/
-class CLDepthwiseConvolution : public IFunction
+class CLDepthwiseConvolutionLayer : public IFunction
{
public:
/** Default constructor */
- CLDepthwiseConvolution();
+ CLDepthwiseConvolutionLayer();
/** Initialize the function's source, destination, weights and convolution information.
*
* @param[in, out] input Source tensor. Data type supported: F32. (Written to only for border filling).
diff --git a/arm_compute/runtime/CL/functions/CLDepthwiseSeparableConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDepthwiseSeparableConvolutionLayer.h
index a38446293b..27cee5ed3b 100644
--- a/arm_compute/runtime/CL/functions/CLDepthwiseSeparableConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDepthwiseSeparableConvolutionLayer.h
@@ -27,7 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
+#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
#include "arm_compute/runtime/IFunction.h"
@@ -39,7 +39,7 @@ class ICLTensor;
/** Basic function to execute depthwise convolution. This function calls the following OpenCL kernels and function:
*
- * -# @ref CLDepthwiseConvolution
+ * -# @ref CLDepthwiseConvolutionLayer
* -# @ref CLDirectConvolutionLayer
*
*/
@@ -72,8 +72,8 @@ public:
void run() override;
private:
- CLDepthwiseConvolution _depthwise_conv;
- CLDirectConvolutionLayer _pointwise_conv;
+ CLDepthwiseConvolutionLayer _depthwise_conv;
+ CLDirectConvolutionLayer _pointwise_conv;
};
}
#endif /*__ARM_COMPUTE_CL_DEPTHWISE_SEPARABLE_CONVOLUTION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLL2Normalize.h b/arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h
index 20af54eda2..8aea7a641b 100644
--- a/arm_compute/runtime/CL/functions/CLL2Normalize.h
+++ b/arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h
@@ -24,7 +24,7 @@
#ifndef __ARM_COMPUTE_CLL2NORMALIZE_H__
#define __ARM_COMPUTE_CLL2NORMALIZE_H__
-#include "arm_compute/core/CL/kernels/CLL2NormalizeKernel.h"
+#include "arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
@@ -41,11 +41,11 @@ class ICLTensor;
/** Perform reduction operation.
*/
-class CLL2Normalize : public IFunction
+class CLL2NormalizeLayer : public IFunction
{
public:
/** Constructor */
- CLL2Normalize(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ CLL2NormalizeLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
@@ -60,10 +60,10 @@ public:
void run() override;
private:
- CLMemoryGroup _memory_group;
- CLReductionOperation _reduce_func;
- CLL2NormalizeKernel _normalize_kernel;
- CLTensor _sumsq;
+ CLMemoryGroup _memory_group;
+ CLReductionOperation _reduce_func;
+ CLL2NormalizeLayerKernel _normalize_kernel;
+ CLTensor _sumsq;
};
}
#endif /*__ARM_COMPUTE_CLL2NORMALIZE_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h b/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h
index 0c6708aa73..585a013e31 100644
--- a/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h
+++ b/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h
@@ -27,7 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLPyramid.h"
#include "arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConvert.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
#include "arm_compute/runtime/CL/functions/CLGaussian5x5.h"
#include "arm_compute/runtime/CL/functions/CLGaussianPyramid.h"
#include "arm_compute/runtime/IFunction.h"
@@ -77,7 +77,7 @@ private:
CLGaussianPyramidHalf _gaussian_pyr_function;
std::unique_ptr<CLGaussian5x5[]> _convf;
std::unique_ptr<CLArithmeticSubtraction[]> _subf;
- CLDepthConvert _depth_function;
+ CLDepthConvertLayer _depth_function;
CLPyramid _gauss_pyr;
CLPyramid _conv_pyr;
};
diff --git a/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h b/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
index 4bc7eb65ce..4a676c85a0 100644
--- a/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
+++ b/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
@@ -27,7 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLPyramid.h"
#include "arm_compute/runtime/CL/functions/CLArithmeticAddition.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConvert.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
#include "arm_compute/runtime/CL/functions/CLScale.h"
#include "arm_compute/runtime/IFunction.h"
@@ -43,7 +43,7 @@ using ICLImage = ICLTensor;
*
* -# @ref CLArithmeticAddition
* -# @ref CLScale
- * -# @ref CLDepthConvert
+ * -# @ref CLDepthConvertLayer
*
* This function reconstructs the original image from a Laplacian Image Pyramid.
*
@@ -85,7 +85,7 @@ private:
CLPyramid _tmp_pyr;
std::unique_ptr<CLArithmeticAddition[]> _addf;
std::unique_ptr<CLScale[]> _scalef;
- CLDepthConvert _depthf;
+ CLDepthConvertLayer _depthf;
};
}
#endif /*__ARM_COMPUTE_CLLAPLACIANRECONSTRUCT_H__ */
diff --git a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h
index 8a345c5fab..e76d4efb27 100644
--- a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h
+++ b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h
@@ -28,7 +28,7 @@
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCAbsoluteDifference.h"
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCActivationLayer.h"
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCBatchNormalizationLayer.h"
-#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.h"
+#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h"
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h"
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDropoutLayer.h"
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCFillBorder.h"
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h
index 801dc0e111..1151399f92 100644
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.h
+++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h
@@ -25,7 +25,7 @@
#define __ARM_COMPUTE_GCDEPTHCONCATENATE_H__
#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
-#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.h"
+#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
@@ -40,14 +40,14 @@ class IGCTensor;
/** Basic function to execute concatenate tensors along z axis. This function calls the following kernels:
*
* -# @ref GCFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
- * -# @ref GCDepthConcatenateKernel
+ * -# @ref GCDepthConcatenateLayerKernel
*
*/
-class GCDepthConcatenate : public IFunction
+class GCDepthConcatenateLayer : public IFunction
{
public:
/** Default constructor */
- GCDepthConcatenate();
+ GCDepthConcatenateLayer();
/** Initialise the kernel's inputs vector and output.
*
* @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: F16/F32.
@@ -59,9 +59,9 @@ public:
void run() override;
private:
- std::unique_ptr<GCDepthConcatenateKernel[]> _concat_kernels_vector;
- std::unique_ptr<GCFillBorderKernel[]> _border_handlers_vector;
- unsigned int _num_inputs;
+ std::unique_ptr<GCDepthConcatenateLayerKernel[]> _concat_kernels_vector;
+ std::unique_ptr<GCFillBorderKernel[]> _border_handlers_vector;
+ unsigned int _num_inputs;
};
}
#endif /* __ARM_COMPUTE_GCDEPTHCONCATENATE_H__ */
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 2e8c084371..08852cf368 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -45,9 +45,9 @@
#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDeconvolutionLayerUpsample.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConcatenate.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConvert.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolution.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDequantizationLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDerivative.h"
@@ -77,7 +77,7 @@
#include "arm_compute/runtime/NEON/functions/NEHistogram.h"
#include "arm_compute/runtime/NEON/functions/NEIm2Col.h"
#include "arm_compute/runtime/NEON/functions/NEIntegralImage.h"
-#include "arm_compute/runtime/NEON/functions/NEL2Normalize.h"
+#include "arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h"
#include "arm_compute/runtime/NEON/functions/NELaplacianPyramid.h"
#include "arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h"
#include "arm_compute/runtime/NEON/functions/NELocallyConnectedLayer.h"
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenate.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index cc65099575..5b63b70634 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenate.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -26,7 +26,7 @@
#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/core/NEON/kernels/NEDepthConcatenateKernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
#include <memory>
@@ -39,14 +39,14 @@ class ITensor;
/** Basic function to execute concatenate tensors along z axis. This function calls the following kernels:
*
* -# @ref NEFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
- * -# @ref NEDepthConcatenateKernel
+ * -# @ref NEDepthConcatenateLayerKernel
*
*/
-class NEDepthConcatenate : public IFunction
+class NEDepthConcatenateLayer : public IFunction
{
public:
/** Default constructor */
- NEDepthConcatenate();
+ NEDepthConcatenateLayer();
/** Initialise the kernel's inputs vector and output.
*
* @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QS8/QS16/F16/F32.
@@ -58,10 +58,10 @@ public:
void run() override;
private:
- std::vector<ITensor *> _inputs_vector;
- std::unique_ptr<NEDepthConcatenateKernel[]> _concat_kernels_vector;
- std::unique_ptr<NEFillBorderKernel[]> _border_handlers_vector;
- unsigned int _num_inputs;
+ std::vector<ITensor *> _inputs_vector;
+ std::unique_ptr<NEDepthConcatenateLayerKernel[]> _concat_kernels_vector;
+ std::unique_ptr<NEFillBorderKernel[]> _border_handlers_vector;
+ unsigned int _num_inputs;
};
}
#endif /* __ARM_COMPUTE_NEDEPTHCONCATENATE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConvert.h b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
index 37f7293fb3..b235e87b4a 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConvert.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
@@ -33,16 +33,16 @@ namespace arm_compute
{
class ITensor;
-/**Basic function to run @ref NEDepthConvertKernel */
-class NEDepthConvert : public INESimpleFunction
+/**Basic function to run @ref NEDepthConvertLayerKernel */
+class NEDepthConvertLayer : public INESimpleFunction
{
public:
/* Contructor */
- NEDepthConvert() = default;
+ NEDepthConvertLayer() = default;
/** Prevent instances of this class from being copied (As this class contains pointers)*/
- NEDepthConvert(const NEDepthConvert &) = delete;
+ NEDepthConvertLayer(const NEDepthConvertLayer &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers)*/
- const NEDepthConvert &operator=(const NEDepthConvert &) = delete;
+ const NEDepthConvertLayer &operator=(const NEDepthConvertLayer &) = delete;
/** Initialize the function's source, destination
*
* Valid conversions Input -> Output :
diff --git a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolution.h b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
index f2c209cd80..0da16ab2a9 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolution.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
@@ -24,7 +24,7 @@
#ifndef __ARM_COMPUTE_NEDEPTHWISECONVOLUTION_H__
#define __ARM_COMPUTE_NEDEPTHWISECONVOLUTION_H__
-#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseIm2ColKernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseVectorToTensorKernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseWeightsReshapeKernel.h"
@@ -43,15 +43,15 @@ class ITensor;
/** Basic function to execute a depthwise convolution for kernel size 3x3xC. This function calls the following NEON kernels:
*
- * -# @ref NEDepthwiseConvolution3x3
+ * -# @ref NEDepthwiseConvolutionLayer3x3
* -# @ref NEFillBorderKernel (if pad_x or pad_y > 0)
*
*/
-class NEDepthwiseConvolution3x3 : public IFunction
+class NEDepthwiseConvolutionLayer3x3 : public IFunction
{
public:
/** Default constructor */
- NEDepthwiseConvolution3x3();
+ NEDepthwiseConvolutionLayer3x3();
/** Initialize the function's source, destination, kernels and border_size.
*
* @param[in, out] input Source tensor. Data type supported: F32. (Written to only for border filling).
@@ -67,7 +67,7 @@ public:
void run() override;
private:
- NEDepthwiseConvolution3x3Kernel _kernel;
+ NEDepthwiseConvolutionLayer3x3Kernel _kernel;
NEDirectConvolutionLayerBiasAccumulateKernel _bias_kernel;
NEFillBorderKernel _border_handler;
bool _has_bias;
@@ -81,11 +81,11 @@ private:
* -# @ref NEFillBorderKernel (if pad_x or pad_y > 0)
*
*/
-class NEDepthwiseConvolution : public IFunction
+class NEDepthwiseConvolutionLayer : public IFunction
{
public:
/** Default constructor */
- NEDepthwiseConvolution();
+ NEDepthwiseConvolutionLayer();
/** Initialize the function's source, destination, weights and convolution information.
*
* @param[in, out] input Source tensor. Data type supported: F32. (Written to only for border filling).
diff --git a/arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h
index 3f4c1389f0..0562c07515 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h
@@ -27,7 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/NEON/INESimpleFunction.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolution.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
#include "arm_compute/runtime/Tensor.h"
@@ -39,7 +39,7 @@ class ITensor;
/** Basic function to execute depthwise convolution. This function calls the following NEON kernels and function:
*
- * -# @ref NEDepthwiseConvolution
+ * -# @ref NEDepthwiseConvolutionLayer
* -# @ref NEDirectConvolutionLayer
*
*/
@@ -72,8 +72,8 @@ public:
void run() override;
private:
- NEDepthwiseConvolution _depthwise_conv;
- NEDirectConvolutionLayer _pointwise_conv;
+ NEDepthwiseConvolutionLayer _depthwise_conv;
+ NEDirectConvolutionLayer _pointwise_conv;
};
}
#endif /*__ARM_COMPUTE_NEON_DEPTHWISE_SEPARABLE_CONVOLUTION_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEL2Normalize.h b/arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h
index 95d5186c13..100e239406 100644
--- a/arm_compute/runtime/NEON/functions/NEL2Normalize.h
+++ b/arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h
@@ -24,7 +24,7 @@
#ifndef __ARM_COMPUTE_NEL2NORMALIZE_H__
#define __ARM_COMPUTE_NEL2NORMALIZE_H__
-#include "arm_compute/core/NEON/kernels/NEL2NormalizeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
@@ -41,13 +41,13 @@ class ITensor;
*
* This function runs the following kernels:
* -# @ref NEReductionOperation
- * -# @ref NEL2NormalizeKernel
+ * -# @ref NEL2NormalizeLayerKernel
*/
-class NEL2Normalize : public IFunction
+class NEL2NormalizeLayer : public IFunction
{
public:
/** Constructor */
- NEL2Normalize(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ NEL2NormalizeLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
* @param[in, out] input Source tensor. Data types supported: F32. (Written to only for border_size != 0)
@@ -61,10 +61,10 @@ public:
void run() override;
private:
- MemoryGroup _memory_group;
- NEReductionOperation _reduce_func;
- NEL2NormalizeKernel _normalize_kernel;
- Tensor _sumsq;
+ MemoryGroup _memory_group;
+ NEReductionOperation _reduce_func;
+ NEL2NormalizeLayerKernel _normalize_kernel;
+ Tensor _sumsq;
};
}
#endif /* __ARM_COMPUTE_NEL2NORMALIZE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NELaplacianPyramid.h b/arm_compute/runtime/NEON/functions/NELaplacianPyramid.h
index 991ae7c293..baa4b7b1a5 100644
--- a/arm_compute/runtime/NEON/functions/NELaplacianPyramid.h
+++ b/arm_compute/runtime/NEON/functions/NELaplacianPyramid.h
@@ -27,7 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConvert.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGaussian5x5.h"
#include "arm_compute/runtime/NEON/functions/NEGaussianPyramid.h"
#include "arm_compute/runtime/Pyramid.h"
@@ -79,7 +79,7 @@ private:
std::unique_ptr<NEArithmeticSubtraction[]> _subf;
Pyramid _gauss_pyr;
Pyramid _conv_pyr;
- NEDepthConvert _depth_function;
+ NEDepthConvertLayer _depth_function;
};
}
#endif /*__ARM_COMPUTE_NELAPLACIANPYRAMID_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h b/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h
index 4139733499..3d423607a3 100644
--- a/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h
+++ b/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h
@@ -27,7 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConvert.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
#include "arm_compute/runtime/NEON/functions/NEScale.h"
#include "arm_compute/runtime/Pyramid.h"
@@ -43,7 +43,7 @@ using IImage = ITensor;
*
* -# @ref NEArithmeticAddition
* -# @ref NEScale
- * -# @ref NEDepthConvert
+ * -# @ref NEDepthConvertLayer
*
* This function reconstructs the original image from a Laplacian Image Pyramid.
*
@@ -85,7 +85,7 @@ private:
Pyramid _tmp_pyr;
std::unique_ptr<NEArithmeticAddition[]> _addf;
std::unique_ptr<NEScale[]> _scalef;
- NEDepthConvert _depthf;
+ NEDepthConvertLayer _depthf;
};
}
#endif /*__ARM_COMPUTE_NELAPLACIANRECONSTRUCT_H__ */
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index cc12897278..9d478e051a 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -175,21 +175,21 @@ v17.09 Public major release
- @ref arm_compute::NEGEMMAssemblyBaseKernel @ref arm_compute::NEGEMMAArch64Kernel
- @ref arm_compute::NEDequantizationLayerKernel / @ref arm_compute::NEDequantizationLayer
- @ref arm_compute::NEFloorKernel / @ref arm_compute::NEFloor
- - @ref arm_compute::NEL2NormalizeKernel / @ref arm_compute::NEL2Normalize
+ - @ref arm_compute::NEL2NormalizeLayerKernel / @ref arm_compute::NEL2NormalizeLayer
- @ref arm_compute::NEQuantizationLayerKernel @ref arm_compute::NEMinMaxLayerKernel / @ref arm_compute::NEQuantizationLayer
- @ref arm_compute::NEROIPoolingLayerKernel / @ref arm_compute::NEROIPoolingLayer
- @ref arm_compute::NEReductionOperationKernel / @ref arm_compute::NEReductionOperation
- @ref arm_compute::NEReshapeLayerKernel / @ref arm_compute::NEReshapeLayer
- New OpenCL kernels / functions:
- - @ref arm_compute::CLDepthwiseConvolution3x3Kernel @ref arm_compute::CLDepthwiseIm2ColKernel @ref arm_compute::CLDepthwiseVectorToTensorKernel @ref arm_compute::CLDepthwiseWeightsReshapeKernel / @ref arm_compute::CLDepthwiseConvolution3x3 @ref arm_compute::CLDepthwiseConvolution @ref arm_compute::CLDepthwiseSeparableConvolutionLayer
+ - @ref arm_compute::CLDepthwiseConvolutionLayer3x3Kernel @ref arm_compute::CLDepthwiseIm2ColKernel @ref arm_compute::CLDepthwiseVectorToTensorKernel @ref arm_compute::CLDepthwiseWeightsReshapeKernel / @ref arm_compute::CLDepthwiseConvolutionLayer3x3 @ref arm_compute::CLDepthwiseConvolutionLayer @ref arm_compute::CLDepthwiseSeparableConvolutionLayer
- @ref arm_compute::CLDequantizationLayerKernel / @ref arm_compute::CLDequantizationLayer
- @ref arm_compute::CLDirectConvolutionLayerKernel / @ref arm_compute::CLDirectConvolutionLayer
- @ref arm_compute::CLFlattenLayer
- @ref arm_compute::CLFloorKernel / @ref arm_compute::CLFloor
- @ref arm_compute::CLGEMMTranspose1xW
- @ref arm_compute::CLGEMMMatrixVectorMultiplyKernel
- - @ref arm_compute::CLL2NormalizeKernel / @ref arm_compute::CLL2Normalize
+ - @ref arm_compute::CLL2NormalizeLayerKernel / @ref arm_compute::CLL2NormalizeLayer
- @ref arm_compute::CLQuantizationLayerKernel @ref arm_compute::CLMinMaxLayerKernel / @ref arm_compute::CLQuantizationLayer
- @ref arm_compute::CLROIPoolingLayerKernel / @ref arm_compute::CLROIPoolingLayer
- @ref arm_compute::CLReductionOperationKernel / @ref arm_compute::CLReductionOperation
@@ -206,7 +206,7 @@ v17.06 Public major release
- User can specify his own scheduler by implementing the @ref arm_compute::IScheduler interface.
- New OpenCL kernels / functions:
- @ref arm_compute::CLBatchNormalizationLayerKernel / @ref arm_compute::CLBatchNormalizationLayer
- - @ref arm_compute::CLDepthConcatenateKernel / @ref arm_compute::CLDepthConcatenate
+ - @ref arm_compute::CLDepthConcatenateLayerKernel / @ref arm_compute::CLDepthConcatenateLayer
- @ref arm_compute::CLHOGOrientationBinningKernel @ref arm_compute::CLHOGBlockNormalizationKernel, @ref arm_compute::CLHOGDetectorKernel / @ref arm_compute::CLHOGDescriptor @ref arm_compute::CLHOGDetector @ref arm_compute::CLHOGGradient @ref arm_compute::CLHOGMultiDetection
- @ref arm_compute::CLLocallyConnectedMatrixMultiplyKernel / @ref arm_compute::CLLocallyConnectedLayer
- @ref arm_compute::CLWeightsReshapeKernel / @ref arm_compute::CLConvolutionLayerReshapeWeights
@@ -214,7 +214,7 @@ v17.06 Public major release
- @ref arm_compute::CPPDetectionWindowNonMaximaSuppressionKernel
- New NEON kernels / functions:
- @ref arm_compute::NEBatchNormalizationLayerKernel / @ref arm_compute::NEBatchNormalizationLayer
- - @ref arm_compute::NEDepthConcatenateKernel / @ref arm_compute::NEDepthConcatenate
+ - @ref arm_compute::NEDepthConcatenateLayerKernel / @ref arm_compute::NEDepthConcatenateLayer
- @ref arm_compute::NEDirectConvolutionLayerKernel / @ref arm_compute::NEDirectConvolutionLayer
- @ref arm_compute::NELocallyConnectedMatrixMultiplyKernel / @ref arm_compute::NELocallyConnectedLayer
- @ref arm_compute::NEWeightsReshapeKernel / @ref arm_compute::NEConvolutionLayerReshapeWeights
diff --git a/src/core/CL/kernels/CLDepthConcatenateKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index edfbf829ed..0275d4fd83 100644
--- a/src/core/CL/kernels/CLDepthConcatenateKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/kernels/CLDepthConcatenateKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -41,17 +41,17 @@
using namespace arm_compute;
-CLDepthConcatenateKernel::CLDepthConcatenateKernel()
+CLDepthConcatenateLayerKernel::CLDepthConcatenateLayerKernel()
: _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0)
{
}
-BorderSize CLDepthConcatenateKernel::border_size() const
+BorderSize CLDepthConcatenateLayerKernel::border_size() const
{
return BorderSize(_top_bottom, _left_right);
}
-void CLDepthConcatenateKernel::configure(const ICLTensor *input, unsigned int depth_offset, ICLTensor *output)
+void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int depth_offset, ICLTensor *output)
{
static std::map<int, std::pair<std::string, int>> configs_map =
{
@@ -108,7 +108,7 @@ void CLDepthConcatenateKernel::configure(const ICLTensor *input, unsigned int de
ICLKernel::configure(win);
}
-void CLDepthConcatenateKernel::run(const Window &window, cl::CommandQueue &queue)
+void CLDepthConcatenateLayerKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
diff --git a/src/core/CL/kernels/CLDepthConvertKernel.cpp b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
index b2132073d5..83908a1469 100644
--- a/src/core/CL/kernels/CLDepthConvertKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/kernels/CLDepthConvertKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -38,7 +38,7 @@
using namespace arm_compute;
-void CLDepthConvertKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
+void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::U8, DataType::S16, DataType::QS16,
DataType::U16, DataType::U32, DataType::S32, DataType::F32);
diff --git a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
index e86c55fbc0..003f1f8330 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/CL/CLHelpers.h"
@@ -60,17 +60,17 @@ TensorShape get_output_shape(TensorShape input_shape, TensorShape weights_shape,
}
} // namespace
-CLDepthwiseConvolution3x3Kernel::CLDepthwiseConvolution3x3Kernel()
+CLDepthwiseConvolutionLayer3x3Kernel::CLDepthwiseConvolutionLayer3x3Kernel()
: _border_size(0), _input(), _output(), _weights(), _biases(), _conv_stride_x(0), _conv_stride_y(0), _conv_pad_left(0), _conv_pad_top(0)
{
}
-BorderSize CLDepthwiseConvolution3x3Kernel::border_size() const
+BorderSize CLDepthwiseConvolutionLayer3x3Kernel::border_size() const
{
return _border_size;
}
-void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+void CLDepthwiseConvolutionLayer3x3Kernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
@@ -179,7 +179,7 @@ void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const IC
ICLKernel::configure(win);
}
-void CLDepthwiseConvolution3x3Kernel::run(const Window &window, cl::CommandQueue &queue)
+void CLDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
diff --git a/src/core/CL/kernels/CLL2NormalizeKernel.cpp b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
index 3e0758c980..36e351e048 100644
--- a/src/core/CL/kernels/CLL2NormalizeKernel.cpp
+++ b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/kernels/CLL2NormalizeKernel.h"
+#include "arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -37,12 +37,12 @@
using namespace arm_compute;
-CLL2NormalizeKernel::CLL2NormalizeKernel()
+CLL2NormalizeLayerKernel::CLL2NormalizeLayerKernel()
: _input(nullptr), _sum(nullptr), _output(nullptr), _axis(0), _epsilon(1e-12)
{
}
-void CLL2NormalizeKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, unsigned int axis, float epsilon)
+void CLL2NormalizeLayerKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, unsigned int axis, float epsilon)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
@@ -87,7 +87,7 @@ void CLL2NormalizeKernel::configure(const ICLTensor *input, const ICLTensor *sum
ICLKernel::configure(win);
}
-void CLL2NormalizeKernel::run(const Window &window, cl::CommandQueue &queue)
+void CLL2NormalizeLayerKernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
diff --git a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
index b90a8e7b89..a6111782fd 100644
--- a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateKernel.h"
+#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/GLES_COMPUTE/GCHelpers.h"
@@ -37,17 +37,17 @@
using namespace arm_compute;
-GCDepthConcatenateKernel::GCDepthConcatenateKernel()
+GCDepthConcatenateLayerKernel::GCDepthConcatenateLayerKernel()
: _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0)
{
}
-BorderSize GCDepthConcatenateKernel::border_size() const
+BorderSize GCDepthConcatenateLayerKernel::border_size() const
{
return BorderSize(_top_bottom, _left_right);
}
-void GCDepthConcatenateKernel::configure(const IGCTensor *input, unsigned int depth_offset, IGCTensor *output)
+void GCDepthConcatenateLayerKernel::configure(const IGCTensor *input, unsigned int depth_offset, IGCTensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -111,7 +111,7 @@ void GCDepthConcatenateKernel::configure(const IGCTensor *input, unsigned int de
IGCKernel::configure(win);
}
-void GCDepthConcatenateKernel::run(const Window &window)
+void GCDepthConcatenateLayerKernel::run(const Window &window)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IGCKernel::window(), window);
diff --git a/src/core/NEON/kernels/NEDepthConcatenateKernel.cpp b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
index 7a62b0cb03..01b0f10f70 100644
--- a/src/core/NEON/kernels/NEDepthConcatenateKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEDepthConcatenateKernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
@@ -95,17 +95,17 @@ void depth_concat(const ITensor *in, ITensor *out, std::pair<int, int> start_xy,
}
} // namespace
-NEDepthConcatenateKernel::NEDepthConcatenateKernel()
+NEDepthConcatenateLayerKernel::NEDepthConcatenateLayerKernel()
: _func(nullptr), _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0)
{
}
-BorderSize NEDepthConcatenateKernel::border_size() const
+BorderSize NEDepthConcatenateLayerKernel::border_size() const
{
return BorderSize(_top_bottom, _left_right);
}
-void NEDepthConcatenateKernel::configure(const ITensor *input, unsigned int depth_offset, ITensor *output)
+void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int depth_offset, ITensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
@@ -159,7 +159,7 @@ void NEDepthConcatenateKernel::configure(const ITensor *input, unsigned int dept
INEKernel::configure(win);
}
-void NEDepthConcatenateKernel::run(const Window &window, const ThreadInfo &info)
+void NEDepthConcatenateLayerKernel::run(const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/core/NEON/kernels/NEDepthConvertKernel.cpp b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
index d97a20be65..c29cb57513 100644
--- a/src/core/NEON/kernels/NEDepthConvertKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEDepthConvertKernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
@@ -39,12 +39,12 @@ namespace arm_compute
class Coordinates;
} // namespace arm_compute
-NEDepthConvertKernel::NEDepthConvertKernel()
+NEDepthConvertLayerKernel::NEDepthConvertLayerKernel()
: _input(nullptr), _output(nullptr), _policy(), _shift(0), _fixed_point_position_input(0), _fixed_point_position_output(0)
{
}
-void NEDepthConvertKernel::configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
+void NEDepthConvertLayerKernel::configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QS8, DataType::S16, DataType::U16, DataType::QS16, DataType::F32);
@@ -120,7 +120,7 @@ void NEDepthConvertKernel::configure(ITensor *input, ITensor *output, ConvertPol
ICPPKernel::configure(win);
}
-void NEDepthConvertKernel::run(const Window &window, const ThreadInfo &info)
+void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
index 5c4bd34e05..02962e0492 100644
--- a/src/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/NEON/kernels/convolution/NEDirectConvolutionDetail.h"
#include "arm_compute/core/AccessWindowStatic.h"
@@ -40,17 +40,17 @@
using namespace arm_compute;
using namespace arm_compute::detail;
-NEDepthwiseConvolution3x3Kernel::NEDepthwiseConvolution3x3Kernel()
+NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
: _border_size(0), _input(), _output(), _weights(), _conv_info()
{
}
-BorderSize NEDepthwiseConvolution3x3Kernel::border_size() const
+BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const
{
return _border_size;
}
-void NEDepthwiseConvolution3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
+void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights);
@@ -161,7 +161,7 @@ public:
}
};
-void NEDepthwiseConvolution3x3Kernel::run(const Window &window, const ThreadInfo &info)
+void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_UNUSED(info);
diff --git a/src/core/NEON/kernels/NEL2NormalizeKernel.cpp b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
index 12c532afd5..3bf1d9400e 100644
--- a/src/core/NEON/kernels/NEL2NormalizeKernel.cpp
+++ b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEL2NormalizeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEL2NormalizeLayerKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
@@ -69,12 +69,12 @@ void l2_normalize_X(const ITensor *in, const ITensor *sum, ITensor *out, float e
}
} // namespace
-NEL2NormalizeKernel::NEL2NormalizeKernel()
+NEL2NormalizeLayerKernel::NEL2NormalizeLayerKernel()
: _input(nullptr), _sum(nullptr), _output(nullptr), _axis(0), _epsilon(1e-12)
{
}
-void NEL2NormalizeKernel::configure(const ITensor *input, const ITensor *sum, ITensor *output, unsigned int axis, float epsilon)
+void NEL2NormalizeLayerKernel::configure(const ITensor *input, const ITensor *sum, ITensor *output, unsigned int axis, float epsilon)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
ARM_COMPUTE_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Normalization axis greater than max number of dimensions");
@@ -109,7 +109,7 @@ void NEL2NormalizeKernel::configure(const ITensor *input, const ITensor *sum, IT
INEKernel::configure(win);
}
-void NEL2NormalizeKernel::run(const Window &window, const ThreadInfo &info)
+void NEL2NormalizeLayerKernel::run(const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp
index 647f88f0e2..8f2bf23ce3 100644
--- a/src/graph/operations/CLSimpleOperations.cpp
+++ b/src/graph/operations/CLSimpleOperations.cpp
@@ -106,7 +106,7 @@ REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationT
return std::move(batch_norm);
}
-/* DepthConvert Layer */
+/* DepthConvertLayer Layer */
REGISTER_SIMPLE_OPERATION(CLDepthConvertLayerOperation, OPENCL, OperationType::DepthConvertLayer)
{
ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
@@ -121,7 +121,7 @@ REGISTER_SIMPLE_OPERATION(CLDepthConvertLayerOperation, OPENCL, OperationType::D
const auto shift = ctx.parameter<uint32_t>("shift");
// Create and configure function
- auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthConvert>();
+ auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthConvertLayer>();
depthconvert->configure(in, out, conv_policy, shift);
// Log info
@@ -156,13 +156,13 @@ REGISTER_SIMPLE_OPERATION(CLDepthwiseConvolutionOperation, OPENCL, OperationType
bool run_3x3_opt = opt3x3 && weights->info()->dimension(0) == 3;
if(run_3x3_opt)
{
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolution>();
+ auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
depwthwise_conv->configure(in, weights, biases, out, conv_info);
func = std::move(depwthwise_conv);
}
else
{
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolution3x3>();
+ auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolutionLayer3x3>();
depwthwise_conv->configure(in, weights, biases, out, conv_info);
func = std::move(depwthwise_conv);
}
@@ -313,7 +313,7 @@ REGISTER_SIMPLE_OPERATION(CLL2NormalizeLayerOperation, OPENCL, OperationType::L2
const auto epsilon = ctx.parameter<float>("epsilon");
// Create and configure function
- auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLL2Normalize>();
+ auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLL2NormalizeLayer>();
l2_norm->configure(in, out, axis, epsilon);
// Log info
diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp
index f234341cec..bb99e8da4b 100644
--- a/src/graph/operations/NESimpleOperations.cpp
+++ b/src/graph/operations/NESimpleOperations.cpp
@@ -106,7 +106,7 @@ REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationTyp
return std::move(batch_norm);
}
-/* DepthConvert Layer */
+/* DepthConvertLayer Layer */
REGISTER_SIMPLE_OPERATION(NEDepthConvertLayerOperation, NEON, OperationType::DepthConvertLayer)
{
ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
@@ -121,7 +121,7 @@ REGISTER_SIMPLE_OPERATION(NEDepthConvertLayerOperation, NEON, OperationType::Dep
const auto shift = ctx.parameter<uint32_t>("shift");
// Create and configure function
- auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthConvert>();
+ auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthConvertLayer>();
depthconvert->configure(in, out, conv_policy, shift);
// Log info
@@ -156,13 +156,13 @@ REGISTER_SIMPLE_OPERATION(NEDepthwiseConvolutionOperation, NEON, OperationType::
bool run_3x3_opt = opt3x3 && weights->info()->dimension(0) == 3;
if(run_3x3_opt)
{
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolution>();
+ auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
depwthwise_conv->configure(in, weights, biases, out, conv_info);
func = std::move(depwthwise_conv);
}
else
{
- auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolution3x3>();
+ auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolutionLayer3x3>();
depwthwise_conv->configure(in, weights, biases, out, conv_info);
func = std::move(depwthwise_conv);
}
@@ -313,7 +313,7 @@ REGISTER_SIMPLE_OPERATION(NEL2NormalizeLayerOperation, NEON, OperationType::L2No
const auto epsilon = ctx.parameter<float>("epsilon");
// Create and configure function
- auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEL2Normalize>();
+ auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEL2NormalizeLayer>();
l2_norm->configure(in, out, axis, epsilon);
// Log info
diff --git a/src/runtime/CL/functions/CLDepthConcatenate.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
index 89e44ca98e..05b5d54cf7 100644
--- a/src/runtime/CL/functions/CLDepthConcatenate.cpp
+++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/CL/functions/CLDepthConcatenate.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Error.h"
@@ -33,7 +33,7 @@
using namespace arm_compute;
-CLDepthConcatenate::CLDepthConcatenate() // NOLINT
+CLDepthConcatenateLayer::CLDepthConcatenateLayer() // NOLINT
: _inputs_vector(),
_concat_kernels_vector(),
_border_handlers_vector(),
@@ -41,7 +41,7 @@ CLDepthConcatenate::CLDepthConcatenate() // NOLINT
{
}
-void CLDepthConcatenate::configure(std::vector<ICLTensor *> inputs_vector, ICLTensor *output) // NOLINT
+void CLDepthConcatenateLayer::configure(std::vector<ICLTensor *> inputs_vector, ICLTensor *output) // NOLINT
{
ARM_COMPUTE_ERROR_ON(inputs_vector.size() < 2);
@@ -49,7 +49,7 @@ void CLDepthConcatenate::configure(std::vector<ICLTensor *> inputs_vector, ICLTe
unsigned int depth_offset = 0;
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLDepthConcatenateKernel[]>(_num_inputs);
+ _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLDepthConcatenateLayerKernel[]>(_num_inputs);
_border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_inputs);
TensorShape output_shape = calculate_depth_concatenate_shape(inputs_vector);
@@ -66,7 +66,7 @@ void CLDepthConcatenate::configure(std::vector<ICLTensor *> inputs_vector, ICLTe
}
}
-void CLDepthConcatenate::run()
+void CLDepthConcatenateLayer::run()
{
cl::CommandQueue q = CLScheduler::get().queue();
diff --git a/src/runtime/CL/functions/CLDepthConvert.cpp b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
index b64d05b8b1..b448465909 100644
--- a/src/runtime/CL/functions/CLDepthConvert.cpp
+++ b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
@@ -21,18 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/CL/functions/CLDepthConvert.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
-#include "arm_compute/core/CL/kernels/CLDepthConvertKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "support/ToolchainSupport.h"
#include <utility>
using namespace arm_compute;
-void CLDepthConvert::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
+void CLDepthConvertLayer::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
{
- auto k = arm_compute::support::cpp14::make_unique<CLDepthConvertKernel>();
+ auto k = arm_compute::support::cpp14::make_unique<CLDepthConvertLayerKernel>();
k->configure(input, output, policy, shift);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index 81149508dd..02273fe08b 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
+#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/PixelValue.h"
@@ -30,12 +30,12 @@
using namespace arm_compute;
-CLDepthwiseConvolution3x3::CLDepthwiseConvolution3x3()
+CLDepthwiseConvolutionLayer3x3::CLDepthwiseConvolutionLayer3x3()
: _kernel(), _border_handler()
{
}
-void CLDepthwiseConvolution3x3::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+void CLDepthwiseConvolutionLayer3x3::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
@@ -52,19 +52,19 @@ void CLDepthwiseConvolution3x3::configure(ICLTensor *input, const ICLTensor *wei
_border_handler.configure(input, _kernel.border_size(), BorderMode::CONSTANT, zero_value);
}
-void CLDepthwiseConvolution3x3::run()
+void CLDepthwiseConvolutionLayer3x3::run()
{
CLScheduler::get().enqueue(_border_handler);
CLScheduler::get().enqueue(_kernel);
}
-CLDepthwiseConvolution::CLDepthwiseConvolution()
+CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer()
: _im2col_kernel(), _weights_reshape_kernel(), _v2mm_kernel(), _vector_to_tensor_kernel(), _v2mm_input_fill_border(), _v2mm_weights_fill_border(), _input_reshaped(), _weights_reshaped(),
_v2mm_output()
{
}
-void CLDepthwiseConvolution::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
@@ -124,7 +124,7 @@ void CLDepthwiseConvolution::configure(ICLTensor *input, const ICLTensor *weight
_v2mm_output.allocator()->allocate();
}
-void CLDepthwiseConvolution::run()
+void CLDepthwiseConvolutionLayer::run()
{
CLScheduler::get().enqueue(_im2col_kernel);
diff --git a/src/runtime/CL/functions/CLL2Normalize.cpp b/src/runtime/CL/functions/CLL2NormalizeLayer.cpp
index 99be8cae4c..d1bb65f1c9 100644
--- a/src/runtime/CL/functions/CLL2Normalize.cpp
+++ b/src/runtime/CL/functions/CLL2NormalizeLayer.cpp
@@ -21,10 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/CL/functions/CLL2Normalize.h"
+#include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/kernels/CLL2NormalizeKernel.h"
+#include "arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/PixelValue.h"
#include "arm_compute/core/TensorInfo.h"
@@ -34,12 +34,12 @@
using namespace arm_compute;
-CLL2Normalize::CLL2Normalize(std::shared_ptr<IMemoryManager> memory_manager)
+CLL2NormalizeLayer::CLL2NormalizeLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _reduce_func(), _normalize_kernel(), _sumsq()
{
}
-void CLL2Normalize::configure(ICLTensor *input, ICLTensor *output, unsigned int axis, float epsilon)
+void CLL2NormalizeLayer::configure(ICLTensor *input, ICLTensor *output, unsigned int axis, float epsilon)
{
// Manage intermediate buffers
_memory_group.manage(&_sumsq);
@@ -52,7 +52,7 @@ void CLL2Normalize::configure(ICLTensor *input, ICLTensor *output, unsigned int
_sumsq.allocator()->allocate();
}
-void CLL2Normalize::run()
+void CLL2NormalizeLayer::run()
{
_memory_group.acquire();
diff --git a/src/runtime/CL/functions/CLLaplacianPyramid.cpp b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
index a395487103..7e5278f380 100644
--- a/src/runtime/CL/functions/CLLaplacianPyramid.cpp
+++ b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
@@ -29,7 +29,7 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConvert.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
#include "arm_compute/runtime/CL/functions/CLGaussian5x5.h"
#include "arm_compute/runtime/CL/functions/CLGaussianPyramid.h"
#include "support/ToolchainSupport.h"
diff --git a/src/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.cpp b/src/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.cpp
index ed756cf261..ee0b121695 100755
--- a/src/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.h"
+#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h"
@@ -32,14 +32,14 @@
using namespace arm_compute;
-GCDepthConcatenate::GCDepthConcatenate() //NOLINT
+GCDepthConcatenateLayer::GCDepthConcatenateLayer() //NOLINT
: _concat_kernels_vector(),
_border_handlers_vector(),
_num_inputs(0)
{
}
-void GCDepthConcatenate::configure(std::vector<IGCTensor *> inputs_vector, IGCTensor *output) //NOLINT
+void GCDepthConcatenateLayer::configure(std::vector<IGCTensor *> inputs_vector, IGCTensor *output) //NOLINT
{
ARM_COMPUTE_ERROR_ON(inputs_vector.size() < 2);
@@ -47,7 +47,7 @@ void GCDepthConcatenate::configure(std::vector<IGCTensor *> inputs_vector, IGCTe
unsigned int depth_offset = 0;
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<GCDepthConcatenateKernel[]>(_num_inputs);
+ _concat_kernels_vector = arm_compute::support::cpp14::make_unique<GCDepthConcatenateLayerKernel[]>(_num_inputs);
_border_handlers_vector = arm_compute::support::cpp14::make_unique<GCFillBorderKernel[]>(_num_inputs);
for(unsigned int i = 0; i < _num_inputs; i++)
@@ -59,7 +59,7 @@ void GCDepthConcatenate::configure(std::vector<IGCTensor *> inputs_vector, IGCTe
}
}
-void GCDepthConcatenate::run()
+void GCDepthConcatenateLayer::run()
{
for(unsigned i = 0; i < _num_inputs; i++)
{
diff --git a/src/runtime/NEON/functions/NEDepthConcatenate.cpp b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
index f8ad2abe61..437c9417ce 100644
--- a/src/runtime/NEON/functions/NEDepthConcatenate.cpp
+++ b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/NEON/functions/NEDepthConcatenate.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
@@ -33,7 +33,7 @@
using namespace arm_compute;
-NEDepthConcatenate::NEDepthConcatenate() // NOLINT
+NEDepthConcatenateLayer::NEDepthConcatenateLayer() // NOLINT
: _inputs_vector(),
_concat_kernels_vector(),
_border_handlers_vector(),
@@ -41,12 +41,12 @@ NEDepthConcatenate::NEDepthConcatenate() // NOLINT
{
}
-void NEDepthConcatenate::configure(std::vector<ITensor *> inputs_vector, ITensor *output) // NOLINT
+void NEDepthConcatenateLayer::configure(std::vector<ITensor *> inputs_vector, ITensor *output) // NOLINT
{
ARM_COMPUTE_ERROR_ON(inputs_vector.size() < 2);
_num_inputs = inputs_vector.size();
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<NEDepthConcatenateKernel[]>(_num_inputs);
+ _concat_kernels_vector = arm_compute::support::cpp14::make_unique<NEDepthConcatenateLayerKernel[]>(_num_inputs);
_border_handlers_vector = arm_compute::support::cpp14::make_unique<NEFillBorderKernel[]>(_num_inputs);
TensorShape output_shape = calculate_depth_concatenate_shape(inputs_vector);
@@ -64,7 +64,7 @@ void NEDepthConcatenate::configure(std::vector<ITensor *> inputs_vector, ITensor
}
}
-void NEDepthConcatenate::run()
+void NEDepthConcatenateLayer::run()
{
for(unsigned i = 0; i < _num_inputs; ++i)
{
diff --git a/src/runtime/NEON/functions/NEDepthConvert.cpp b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
index 37857b6534..9a75404fcd 100644
--- a/src/runtime/NEON/functions/NEDepthConvert.cpp
+++ b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
@@ -21,18 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/NEON/functions/NEDepthConvert.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
-#include "arm_compute/core/NEON/kernels/NEDepthConvertKernel.h"
+#include "arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h"
#include "support/ToolchainSupport.h"
#include <utility>
using namespace arm_compute;
-void NEDepthConvert::configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
+void NEDepthConvertLayer::configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
{
- auto k = arm_compute::support::cpp14::make_unique<NEDepthConvertKernel>();
+ auto k = arm_compute::support::cpp14::make_unique<NEDepthConvertLayerKernel>();
k->configure(input, output, policy, shift);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolution.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index e12bc07464..b890c6f5d5 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolution.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolution.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
@@ -31,12 +31,12 @@
using namespace arm_compute;
-NEDepthwiseConvolution3x3::NEDepthwiseConvolution3x3()
+NEDepthwiseConvolutionLayer3x3::NEDepthwiseConvolutionLayer3x3()
: _kernel(), _bias_kernel(), _border_handler(), _has_bias(false)
{
}
-void NEDepthwiseConvolution3x3::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
+void NEDepthwiseConvolutionLayer3x3::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights);
@@ -51,7 +51,7 @@ void NEDepthwiseConvolution3x3::configure(ITensor *input, const ITensor *weights
}
}
-void NEDepthwiseConvolution3x3::run()
+void NEDepthwiseConvolutionLayer3x3::run()
{
NEScheduler::get().schedule(&_border_handler, Window::DimX);
NEScheduler::get().schedule(&_kernel, Window::DimX);
@@ -61,12 +61,12 @@ void NEDepthwiseConvolution3x3::run()
}
}
-NEDepthwiseConvolution::NEDepthwiseConvolution()
+NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayer()
: _im2col_kernel(), _weights_reshape_kernel(), _v2mm_kernel(), _vector_to_tensor_kernel(), _input_reshaped(), _weights_reshaped(), _v2mm_output()
{
}
-void NEDepthwiseConvolution::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
+void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
@@ -117,7 +117,7 @@ void NEDepthwiseConvolution::configure(ITensor *input, const ITensor *weights, c
_v2mm_output.allocator()->allocate();
}
-void NEDepthwiseConvolution::run()
+void NEDepthwiseConvolutionLayer::run()
{
NEScheduler::get().schedule(&_im2col_kernel, Window::DimX);
NEScheduler::get().schedule(&_weights_reshape_kernel, Window::DimX);
diff --git a/src/runtime/NEON/functions/NEL2Normalize.cpp b/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
index 349a781b0b..fa62483146 100644
--- a/src/runtime/NEON/functions/NEL2Normalize.cpp
+++ b/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
@@ -21,19 +21,19 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/NEON/functions/NEL2Normalize.h"
+#include "arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
using namespace arm_compute;
-NEL2Normalize::NEL2Normalize(std::shared_ptr<IMemoryManager> memory_manager)
+NEL2NormalizeLayer::NEL2NormalizeLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _reduce_func(), _normalize_kernel(), _sumsq()
{
}
-void NEL2Normalize::configure(ITensor *input, ITensor *output, unsigned int axis, float epsilon)
+void NEL2NormalizeLayer::configure(ITensor *input, ITensor *output, unsigned int axis, float epsilon)
{
// Manage intermediate buffers
_memory_group.manage(&_sumsq);
@@ -46,7 +46,7 @@ void NEL2Normalize::configure(ITensor *input, ITensor *output, unsigned int axis
_sumsq.allocator()->allocate();
}
-void NEL2Normalize::run()
+void NEL2NormalizeLayer::run()
{
_memory_group.acquire();
diff --git a/src/runtime/NEON/functions/NELaplacianPyramid.cpp b/src/runtime/NEON/functions/NELaplacianPyramid.cpp
index a680f1f11d..0e149d4176 100644
--- a/src/runtime/NEON/functions/NELaplacianPyramid.cpp
+++ b/src/runtime/NEON/functions/NELaplacianPyramid.cpp
@@ -28,7 +28,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConvert.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGaussian5x5.h"
#include "arm_compute/runtime/NEON/functions/NEGaussianPyramid.h"
#include "arm_compute/runtime/Tensor.h"
diff --git a/tests/benchmark/CL/DepthwiseConvolution.cpp b/tests/benchmark/CL/DepthwiseConvolutionLayer.cpp
index 40412da6f9..be6fba0a90 100644
--- a/tests/benchmark/CL/DepthwiseConvolution.cpp
+++ b/tests/benchmark/CL/DepthwiseConvolutionLayer.cpp
@@ -25,10 +25,10 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
+#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
-#include "tests/benchmark/fixtures/DepthwiseConvolutionFixture.h"
-#include "tests/datasets/MobileNetDepthwiseConvolutionDataset.h"
+#include "tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h"
+#include "tests/datasets/MobileNetDepthwiseConvolutionLayerDataset.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "utils/TypePrinter.h"
@@ -37,13 +37,13 @@ namespace arm_compute
{
namespace test
{
-const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
-using CLDepthwiseConvolutionFixture = DepthwiseConvolutionFixture<CLTensor, CLDepthwiseConvolution, CLAccessor>;
+const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
+using CLDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerFixture<CLTensor, CLDepthwiseConvolutionLayer, CLAccessor>;
TEST_SUITE(CL)
-REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetDepthwiseConvolution, CLDepthwiseConvolutionFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(datasets::MobileNetDepthwiseConvolutionDataset(), data_types),
+REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetDepthwiseConvolutionLayer, CLDepthwiseConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(datasets::MobileNetDepthwiseConvolutionLayerDataset(), data_types),
framework::dataset::make("Batches", { 1 })));
TEST_SUITE_END()
diff --git a/tests/benchmark/CL/SYSTEM/MobileNet.cpp b/tests/benchmark/CL/SYSTEM/MobileNet.cpp
index c745a0acab..4712bc0c80 100644
--- a/tests/benchmark/CL/SYSTEM/MobileNet.cpp
+++ b/tests/benchmark/CL/SYSTEM/MobileNet.cpp
@@ -27,7 +27,7 @@
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
+#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
@@ -46,7 +46,7 @@ using CLMobileNetFixture = MobileNetFixture<CLTensor,
CLActivationLayer,
CLConvolutionLayer,
CLDirectConvolutionLayer,
- CLDepthwiseConvolution,
+ CLDepthwiseConvolutionLayer,
CLReshapeLayer,
CLPoolingLayer>;
diff --git a/tests/benchmark/CL/SYSTEM/MobileNetV1.cpp b/tests/benchmark/CL/SYSTEM/MobileNetV1.cpp
index 66be3231cf..851148a860 100644
--- a/tests/benchmark/CL/SYSTEM/MobileNetV1.cpp
+++ b/tests/benchmark/CL/SYSTEM/MobileNetV1.cpp
@@ -28,7 +28,7 @@
#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
#include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
+#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
@@ -49,7 +49,7 @@ using CLMobileNetV1_224_Fixture = MobileNetV1Fixture<CLTensor,
CLBatchNormalizationLayer,
CLConvolutionLayer,
CLDirectConvolutionLayer,
- CLDepthwiseConvolution3x3,
+ CLDepthwiseConvolutionLayer3x3,
CLReshapeLayer,
CLPoolingLayer,
CLSoftmaxLayer,
@@ -61,7 +61,7 @@ using CLMobileNetV1_128_Fixture = MobileNetV1Fixture<CLTensor,
CLBatchNormalizationLayer,
CLConvolutionLayer,
CLDirectConvolutionLayer,
- CLDepthwiseConvolution3x3,
+ CLDepthwiseConvolutionLayer3x3,
CLReshapeLayer,
CLPoolingLayer,
CLSoftmaxLayer,
diff --git a/tests/benchmark/fixtures/DepthwiseConvolutionFixture.h b/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
index 6de7bcadeb..9a49d5613a 100644
--- a/tests/benchmark/fixtures/DepthwiseConvolutionFixture.h
+++ b/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -36,7 +36,7 @@ namespace test
{
/** Fixture that can be used for NEON and CL */
template <typename TensorType, typename Function, typename Accessor>
-class DepthwiseConvolutionFixture : public framework::Fixture
+class DepthwiseConvolutionLayerFixture : public framework::Fixture
{
public:
template <typename...>
diff --git a/tests/benchmark/fixtures/MobileNetFixture.h b/tests/benchmark/fixtures/MobileNetFixture.h
index 6c1ee300c1..660205c7ec 100644
--- a/tests/benchmark/fixtures/MobileNetFixture.h
+++ b/tests/benchmark/fixtures/MobileNetFixture.h
@@ -38,7 +38,7 @@ template <typename TensorType,
typename ActivationLayerFunction,
typename ConvolutionLayerFunction,
typename DirectConvolutionLayerFunction,
- typename DepthwiseConvolutionFunction,
+ typename DepthwiseConvolutionLayerFunction,
typename ReshapeFunction,
typename PoolingLayerFunction>
class MobileNetFixture : public framework::Fixture
@@ -69,7 +69,7 @@ private:
ActivationLayerFunction,
ConvolutionLayerFunction,
DirectConvolutionLayerFunction,
- DepthwiseConvolutionFunction,
+ DepthwiseConvolutionLayerFunction,
ReshapeFunction,
PoolingLayerFunction>
network{};
diff --git a/tests/datasets/DepthwiseConvolutionDataset.h b/tests/datasets/DepthwiseConvolutionLayerDataset.h
index 2c8347fc8c..a2caba9b2c 100644
--- a/tests/datasets/DepthwiseConvolutionDataset.h
+++ b/tests/datasets/DepthwiseConvolutionLayerDataset.h
@@ -35,7 +35,7 @@ namespace test
{
namespace datasets
{
-class DepthwiseConvolutionDataset
+class DepthwiseConvolutionLayerDataset
{
public:
using type = std::tuple<TensorShape, TensorShape, TensorShape, TensorShape, PadStrideInfo>;
@@ -66,7 +66,7 @@ public:
return description.str();
}
- DepthwiseConvolutionDataset::type operator*() const
+ DepthwiseConvolutionLayerDataset::type operator*() const
{
return std::make_tuple(*_src_it, *_weights_it, *_biases_it, *_dst_it, *_infos_it);
}
@@ -110,8 +110,8 @@ public:
}
protected:
- DepthwiseConvolutionDataset() = default;
- DepthwiseConvolutionDataset(DepthwiseConvolutionDataset &&) = default;
+ DepthwiseConvolutionLayerDataset() = default;
+ DepthwiseConvolutionLayerDataset(DepthwiseConvolutionLayerDataset &&) = default;
private:
std::vector<TensorShape> _src_shapes{};
@@ -120,10 +120,10 @@ private:
std::vector<TensorShape> _dst_shapes{};
std::vector<PadStrideInfo> _infos{};
};
-class SmallDepthwiseConvolutionDataset final : public DepthwiseConvolutionDataset
+class SmallDepthwiseConvolutionLayerDataset final : public DepthwiseConvolutionLayerDataset
{
public:
- SmallDepthwiseConvolutionDataset()
+ SmallDepthwiseConvolutionLayerDataset()
{
add_config(TensorShape(7U, 7U, 3U), TensorShape(3U, 3U, 3U), TensorShape(3U), TensorShape(5U, 5U, 3U), PadStrideInfo(1, 1, 0, 0));
add_config(TensorShape(23U, 27U, 5U), TensorShape(3U, 5U, 5U), TensorShape(5U), TensorShape(11U, 23U, 5U), PadStrideInfo(2, 1, 0, 0));
@@ -142,10 +142,10 @@ public:
}
};
-class LargeDepthwiseConvolutionDataset final : public DepthwiseConvolutionDataset
+class LargeDepthwiseConvolutionLayerDataset final : public DepthwiseConvolutionLayerDataset
{
public:
- LargeDepthwiseConvolutionDataset()
+ LargeDepthwiseConvolutionLayerDataset()
{
add_config(TensorShape(233U, 277U, 55U), TensorShape(3U, 3U, 55U), TensorShape(55U), TensorShape(116U, 275U, 55U), PadStrideInfo(2, 1, 0, 0));
add_config(TensorShape(333U, 277U, 77U), TensorShape(3U, 3U, 77U), TensorShape(77U), TensorShape(111U, 138U, 77U), PadStrideInfo(3, 2, 1, 0));
@@ -156,10 +156,10 @@ public:
}
};
-class SmallDepthwiseConvolutionDataset3x3 final : public DepthwiseConvolutionDataset
+class SmallDepthwiseConvolutionLayerDataset3x3 final : public DepthwiseConvolutionLayerDataset
{
public:
- SmallDepthwiseConvolutionDataset3x3()
+ SmallDepthwiseConvolutionLayerDataset3x3()
{
add_config(TensorShape(7U, 7U, 3U, 2U), TensorShape(3U, 3U, 3U), TensorShape(3U), TensorShape(5U, 5U, 3U, 2U), PadStrideInfo(1, 1, 0, 0));
add_config(TensorShape(33U, 27U, 11U), TensorShape(3U, 3U, 11U), TensorShape(11U), TensorShape(11U, 14U, 11U), PadStrideInfo(3, 2, 1, 1));
@@ -168,10 +168,10 @@ public:
}
};
-class LargeDepthwiseConvolutionDataset3x3 final : public DepthwiseConvolutionDataset
+class LargeDepthwiseConvolutionLayerDataset3x3 final : public DepthwiseConvolutionLayerDataset
{
public:
- LargeDepthwiseConvolutionDataset3x3()
+ LargeDepthwiseConvolutionLayerDataset3x3()
{
add_config(TensorShape(233U, 277U, 55U, 3U), TensorShape(3U, 3U, 55U), TensorShape(55U), TensorShape(116U, 275U, 55U, 3U), PadStrideInfo(2, 1, 0, 0));
add_config(TensorShape(333U, 277U, 77U), TensorShape(3U, 3U, 77U), TensorShape(77U), TensorShape(111U, 138U, 77U), PadStrideInfo(3, 2, 1, 0));
diff --git a/tests/datasets/MobileNetDepthwiseConvolutionDataset.h b/tests/datasets/MobileNetDepthwiseConvolutionLayerDataset.h
index 918815f41e..5531a08d8e 100644
--- a/tests/datasets/MobileNetDepthwiseConvolutionDataset.h
+++ b/tests/datasets/MobileNetDepthwiseConvolutionLayerDataset.h
@@ -24,7 +24,7 @@
#ifndef ARM_COMPUTE_TEST_MOBILENET_DEPTHWISE_CONVOLUTION_DATASET
#define ARM_COMPUTE_TEST_MOBILENET_DEPTHWISE_CONVOLUTION_DATASET
-#include "tests/datasets/DepthwiseConvolutionDataset.h"
+#include "tests/datasets/DepthwiseConvolutionLayerDataset.h"
#include "utils/TypePrinter.h"
@@ -37,10 +37,10 @@ namespace test
{
namespace datasets
{
-class MobileNetDepthwiseConvolutionDataset final : public DepthwiseConvolutionDataset
+class MobileNetDepthwiseConvolutionLayerDataset final : public DepthwiseConvolutionLayerDataset
{
public:
- MobileNetDepthwiseConvolutionDataset()
+ MobileNetDepthwiseConvolutionLayerDataset()
{
add_config(TensorShape(7U, 7U, 1024U), TensorShape(3U, 3U, 1024U), TensorShape(1024U), TensorShape(3U, 3U, 1024U), PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR));
add_config(TensorShape(14U, 14U, 512U), TensorShape(3U, 3U, 512U), TensorShape(512U), TensorShape(7U, 7U, 512U), PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR));
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 173ee74958..02a71aa7b5 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -269,11 +269,11 @@ public:
}
};
-/** Data set containing 2D tensor shapes for DepthConcatenate. */
-class DepthConcatenateShapes final : public ShapeDataset
+/** Data set containing 2D tensor shapes for DepthConcatenateLayer. */
+class DepthConcatenateLayerShapes final : public ShapeDataset
{
public:
- DepthConcatenateShapes()
+ DepthConcatenateLayerShapes()
: ShapeDataset("Shape",
{
TensorShape{ 322U, 243U },
diff --git a/tests/networks/MobileNetNetwork.h b/tests/networks/MobileNetNetwork.h
index 74dce0e348..1bc8ad9a0c 100644
--- a/tests/networks/MobileNetNetwork.h
+++ b/tests/networks/MobileNetNetwork.h
@@ -47,7 +47,7 @@ template <typename TensorType,
typename ActivationLayerFunction,
typename ConvolutionLayerFunction,
typename DirectConvolutionLayerFunction,
- typename DepthwiseConvolutionFunction,
+ typename DepthwiseConvolutionLayerFunction,
typename ReshapeFunction,
typename PoolingLayerFunction>
class MobileNetNetwork
@@ -279,9 +279,9 @@ private:
ConvolutionLayerFunction conv3x3{};
ActivationLayerFunction conv3x3_act{};
- std::array<ActivationLayerFunction, 26> act{ {} };
- std::array<DirectConvolutionLayerFunction, 14> conv1x1{ {} };
- std::array<DepthwiseConvolutionFunction, 13> dwc3x3{ {} };
+ std::array<ActivationLayerFunction, 26> act{ {} };
+ std::array<DirectConvolutionLayerFunction, 14> conv1x1{ {} };
+ std::array<DepthwiseConvolutionLayerFunction, 13> dwc3x3{ {} };
PoolingLayerFunction pool{};
ActivationLayerFunction logistic{};
ReshapeFunction reshape{};
diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp
index 19a8b369ce..02901371bb 100644
--- a/tests/validation/CL/DepthConcatenateLayer.cpp
+++ b/tests/validation/CL/DepthConcatenateLayer.cpp
@@ -24,7 +24,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConcatenate.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/framework/Asserts.h"
@@ -45,7 +45,7 @@ TEST_SUITE(DepthConcatenateLayer)
//TODO(COMPMID-415): Add configuration test?
template <typename T>
-using CLDepthConcatenateLayerFixture = DepthConcatenateValidationFixture<CLTensor, ICLTensor, CLAccessor, CLDepthConcatenate, T>;
+using CLDepthConcatenateLayerFixture = DepthConcatenateLayerValidationFixture<CLTensor, ICLTensor, CLAccessor, CLDepthConcatenateLayer, T>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
@@ -70,7 +70,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<float>, framewor
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(), framework::dataset::make("DataType",
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType",
DataType::F32)))
{
// Validate output
@@ -88,7 +88,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int8_t>, framewo
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
framework::dataset::make("DataType",
DataType::QS8)))
{
@@ -105,7 +105,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int16_t>, framew
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
framework::dataset::make("DataType",
DataType::QS16)))
{
diff --git a/tests/validation/CL/DepthConvert.cpp b/tests/validation/CL/DepthConvertLayer.cpp
index 57669f0a52..9c6cc46ca8 100644
--- a/tests/validation/CL/DepthConvert.cpp
+++ b/tests/validation/CL/DepthConvertLayer.cpp
@@ -24,7 +24,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConvert.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
#include "tests/datasets/ConvertPolicyDataset.h"
@@ -33,7 +33,7 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/DepthConvertFixture.h"
+#include "tests/validation/fixtures/DepthConvertLayerFixture.h"
namespace arm_compute
{
@@ -44,43 +44,43 @@ namespace validation
namespace
{
/** Input data sets **/
-const auto DepthConvertU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
-const auto DepthConvertU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
-const auto DepthConvertU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
-const auto DepthConvertS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8));
-const auto DepthConvertFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16));
-const auto DepthConvertShiftDataset = framework::dataset::make("Shift", 0, 7);
-const auto DepthConvertFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7);
+const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
+const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
+const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
+const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8));
+const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16));
+const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7);
+const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7);
} // namespace
TEST_SUITE(CL)
-TEST_SUITE(DepthConvert)
+TEST_SUITE(DepthConvertLayer)
template <typename T>
-using CLDepthConvertToU16Fixture = DepthConvertValidationFixture<CLTensor, CLAccessor, CLDepthConvert, T, uint16_t>;
+using CLDepthConvertLayerToU16Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint16_t>;
template <typename T>
-using CLDepthConvertToS16Fixture = DepthConvertValidationFixture<CLTensor, CLAccessor, CLDepthConvert, T, int16_t>;
+using CLDepthConvertLayerToS16Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int16_t>;
template <typename T>
-using CLDepthConvertToS32Fixture = DepthConvertValidationFixture<CLTensor, CLAccessor, CLDepthConvert, T, int32_t>;
+using CLDepthConvertLayerToS32Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int32_t>;
template <typename T>
-using CLDepthConvertToU8Fixture = DepthConvertValidationFixture<CLTensor, CLAccessor, CLDepthConvert, T, uint8_t>;
+using CLDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint8_t>;
template <typename T>
-using CLDepthConvertToU32Fixture = DepthConvertValidationFixture<CLTensor, CLAccessor, CLDepthConvert, T, uint32_t>;
+using CLDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint32_t>;
template <typename T>
-using CLDepthConvertToFP32FixedPointFixture = DepthConvertValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvert, T, float>;
+using CLDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, float>;
template <typename T>
-using CLDepthConvertToQS8FixedPointFixture = DepthConvertValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvert, T, int8_t>;
+using CLDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int8_t>;
template <typename T>
-using CLDepthConvertToQS16FixedPointFixture = DepthConvertValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvert, T, int16_t>;
+using CLDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int16_t>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -90,7 +90,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CLTensor dst = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -103,17 +103,17 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertToU16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU8toU16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toU16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToU16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU8toU16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toU16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -122,7 +122,7 @@ TEST_SUITE_END()
TEST_SUITE(U8_to_S16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -132,7 +132,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -145,17 +145,17 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertToS16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU8toS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToS16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU8toS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -163,7 +163,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToS16Fixture<uint8_t>, framework:
TEST_SUITE_END()
TEST_SUITE(U8_to_S32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -173,7 +173,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -186,17 +186,17 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU8toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToS32Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU8toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -205,7 +205,7 @@ TEST_SUITE_END()
TEST_SUITE(U16_to_U8)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -215,7 +215,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -228,16 +228,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertToU8Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToU8Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -246,7 +246,7 @@ TEST_SUITE_END()
TEST_SUITE(U16_to_U32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -256,7 +256,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CLTensor dst = create_tensor<CLTensor>(shape, DataType::U32, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -269,16 +269,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertToU32Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU16toU32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToU32Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU16toU32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -287,7 +287,7 @@ TEST_SUITE_END()
TEST_SUITE(S16_to_U8)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -297,7 +297,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -310,16 +310,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertToU8Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertS16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToU8Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertS16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -328,7 +328,7 @@ TEST_SUITE_END()
TEST_SUITE(S16_to_S32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -338,7 +338,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -351,16 +351,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertToS32Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertS16toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertToS32Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertS16toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -370,7 +370,7 @@ TEST_SUITE_END()
TEST_SUITE(Quantized_to_FP32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset),
+ DepthConvertLayerFixedPointQuantizedDataset),
shape, dt, policy, fixed_point_position)
{
int shift = 0;
@@ -380,7 +380,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
CLTensor dst = create_tensor<CLTensor>(shape, DataType::F32, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -392,30 +392,34 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
validate(src.info()->padding(), padding);
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertQS8toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerQS8toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertQS16toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerQS16toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS8, CLDepthConvertToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertQS8toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS8, CLDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerQS8toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS16, CLDepthConvertToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertQS16toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS16, CLDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerQS16toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -425,7 +429,7 @@ TEST_SUITE_END()
TEST_SUITE(FP32_to_Quantized)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset),
+ DepthConvertLayerFixedPointQuantizedDataset),
shape, dt, policy, fixed_point_position)
{
int shift = 0;
@@ -435,7 +439,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
CLTensor dst = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
// Create and Configure function
- CLDepthConvert depth_convert;
+ CLDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -447,30 +451,34 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
validate(src.info()->padding(), padding);
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertFP32toQS8Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerFP32toQS8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertFP32toQS16Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerFP32toQS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS8, CLDepthConvertToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertFP32toQS8Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS8, CLDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerFP32toQS8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS16, CLDepthConvertToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertFP32toQS16Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS16, CLDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerFP32toQS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/CL/DepthwiseConvolution.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
index ccd9c36561..92a2773e54 100644
--- a/tests/validation/CL/DepthwiseConvolution.cpp
+++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
@@ -24,15 +24,15 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
+#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
-#include "tests/datasets/DepthwiseConvolutionDataset.h"
+#include "tests/datasets/DepthwiseConvolutionLayerDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/DepthwiseConvolutionFixture.h"
+#include "tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h"
namespace arm_compute
{
@@ -50,34 +50,37 @@ TEST_SUITE(CL)
TEST_SUITE(DepthwiseConvolutionLayer)
template <typename T>
-using CLDepthwiseConvolutionFixture = DepthwiseConvolutionValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolution, T>;
+using CLDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer, T>;
TEST_SUITE(Generic)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallDepthwiseConvolutionDataset(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallDepthwiseConvolutionLayerDataset(), framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionDataset(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionLayerDataset(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
TEST_SUITE_END()
template <typename T>
-using CLDepthwiseConvolutionFixture3x3 = DepthwiseConvolutionValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolution3x3, T>;
+using CLDepthwiseConvolutionLayerFixture3x3 = DepthwiseConvolutionLayerValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer3x3, T>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
TEST_SUITE(W3x3)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionFixture3x3<float>, framework::DatasetMode::ALL, combine(datasets::SmallDepthwiseConvolutionDataset3x3(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionFixture3x3<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionDataset3x3(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerFixture3x3<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
@@ -86,18 +89,18 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLDepthwiseConvolutionQuantizedFixture3x3 = DepthwiseConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDepthwiseConvolution3x3, T>;
+using CLDepthwiseConvolutionLayerQuantizedFixture3x3 = DepthwiseConvolutionLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer3x3, T>;
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
TEST_SUITE(W3x3)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDepthwiseConvolutionDataset3x3(),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127) })))
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDepthwiseConvolutionDataset3x3(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 127) })))
{
diff --git a/tests/validation/CL/L2Normalize.cpp b/tests/validation/CL/L2NormalizeLayer.cpp
index 4b0820c211..bc2374bc68 100644
--- a/tests/validation/CL/L2Normalize.cpp
+++ b/tests/validation/CL/L2NormalizeLayer.cpp
@@ -24,7 +24,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLL2Normalize.h"
+#include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
#include "tests/datasets/ShapeDatasets.h"
@@ -32,7 +32,7 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/L2NormalizeFixture.h"
+#include "tests/validation/fixtures/L2NormalizeLayerFixture.h"
namespace arm_compute
{
@@ -48,20 +48,20 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
} // namespace
TEST_SUITE(CL)
-TEST_SUITE(L2Normalize)
+TEST_SUITE(L2NormalizeLayer)
template <typename T>
-using CLL2NormalizeFixture = L2NormalizeValidationFixture<CLTensor, CLAccessor, CLL2Normalize, T>;
+using CLL2NormalizeLayerFixture = L2NormalizeLayerValidationFixture<CLTensor, CLAccessor, CLL2NormalizeLayer, T>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLL2NormalizeFixture<float>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE(RunSmall, CLL2NormalizeLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLL2NormalizeFixture<float>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE(RunLarge, CLL2NormalizeLayerFixture<float>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
{
// Validate output
diff --git a/tests/validation/CPP/DepthConvert.cpp b/tests/validation/CPP/DepthConvertLayer.cpp
index 110174a73f..dd095b8912 100644
--- a/tests/validation/CPP/DepthConvert.cpp
+++ b/tests/validation/CPP/DepthConvertLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "DepthConvert.h"
+#include "DepthConvertLayer.h"
#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
diff --git a/tests/validation/CPP/DepthConvert.h b/tests/validation/CPP/DepthConvertLayer.h
index 1446bfda5b..1446bfda5b 100644
--- a/tests/validation/CPP/DepthConvert.h
+++ b/tests/validation/CPP/DepthConvertLayer.h
diff --git a/tests/validation/CPP/DepthwiseConvolution.cpp b/tests/validation/CPP/DepthwiseConvolutionLayer.cpp
index 229e044783..99baa4b3c7 100644
--- a/tests/validation/CPP/DepthwiseConvolution.cpp
+++ b/tests/validation/CPP/DepthwiseConvolutionLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "DepthwiseConvolution.h"
+#include "DepthwiseConvolutionLayer.h"
#include "ConvolutionLayer.h"
#include "Utils.h"
diff --git a/tests/validation/CPP/DepthwiseConvolution.h b/tests/validation/CPP/DepthwiseConvolutionLayer.h
index df743a5b8e..df743a5b8e 100644
--- a/tests/validation/CPP/DepthwiseConvolution.h
+++ b/tests/validation/CPP/DepthwiseConvolutionLayer.h
diff --git a/tests/validation/CPP/DepthwiseSeparableConvolutionLayer.cpp b/tests/validation/CPP/DepthwiseSeparableConvolutionLayer.cpp
index 8c8e50d349..ca6c168114 100644
--- a/tests/validation/CPP/DepthwiseSeparableConvolutionLayer.cpp
+++ b/tests/validation/CPP/DepthwiseSeparableConvolutionLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "DepthwiseConvolution.h"
+#include "DepthwiseConvolutionLayer.h"
#include "DepthwiseSeparableConvolutionLayer.h"
diff --git a/tests/validation/CPP/L2Normalize.cpp b/tests/validation/CPP/L2NormalizeLayer.cpp
index 4fb4d57eb4..99f4e8a6e6 100644
--- a/tests/validation/CPP/L2Normalize.cpp
+++ b/tests/validation/CPP/L2NormalizeLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "L2Normalize.h"
+#include "L2NormalizeLayer.h"
#include "ReductionOperation.h"
#include "tests/validation/Helpers.h"
diff --git a/tests/validation/CPP/L2Normalize.h b/tests/validation/CPP/L2NormalizeLayer.h
index 1db3ae6174..1db3ae6174 100644
--- a/tests/validation/CPP/L2Normalize.h
+++ b/tests/validation/CPP/L2NormalizeLayer.h
diff --git a/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp b/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp
index 829845dd36..7af3050c1d 100644
--- a/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp
@@ -24,7 +24,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h"
-#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenate.h"
+#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h"
#include "tests/GLES_COMPUTE/GCAccessor.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/framework/Asserts.h"
@@ -45,7 +45,7 @@ TEST_SUITE(DepthConcatenateLayer)
//TODO(COMPMID-415): Add configuration test?
template <typename T>
-using GCDepthConcatenateLayerFixture = DepthConcatenateValidationFixture<GCTensor, IGCTensor, GCAccessor, GCDepthConcatenate, T>;
+using GCDepthConcatenateLayerFixture = DepthConcatenateLayerValidationFixture<GCTensor, IGCTensor, GCAccessor, GCDepthConcatenateLayer, T>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
@@ -70,7 +70,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, GCDepthConcatenateLayerFixture<float>, framewor
// Validate output
validate(GCAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(), framework::dataset::make("DataType",
+FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType",
DataType::F32)))
{
// Validate output
diff --git a/tests/validation/NEON/DepthConcatenateLayer.cpp b/tests/validation/NEON/DepthConcatenateLayer.cpp
index 9a0a34f8f8..7e99ab5dc7 100644
--- a/tests/validation/NEON/DepthConcatenateLayer.cpp
+++ b/tests/validation/NEON/DepthConcatenateLayer.cpp
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConcatenate.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
@@ -45,7 +45,7 @@ TEST_SUITE(DepthConcatenateLayer)
//TODO(COMPMID-415): Add configuration test?
template <typename T>
-using NEDepthConcatenateLayerFixture = DepthConcatenateValidationFixture<Tensor, ITensor, Accessor, NEDepthConcatenate, T>;
+using NEDepthConcatenateLayerFixture = DepthConcatenateLayerValidationFixture<Tensor, ITensor, Accessor, NEDepthConcatenateLayer, T>;
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -56,7 +56,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<half>, framework
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(), framework::dataset::make("DataType",
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType",
DataType::F16)))
{
// Validate output
@@ -72,7 +72,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<float>, framewor
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(), framework::dataset::make("DataType",
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType",
DataType::F32)))
{
// Validate output
@@ -90,7 +90,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int8_t>, framewo
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
framework::dataset::make("DataType",
DataType::QS8)))
{
@@ -107,7 +107,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int16_t>, framew
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateShapes(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
framework::dataset::make("DataType",
DataType::QS16)))
{
diff --git a/tests/validation/NEON/DepthConvert.cpp b/tests/validation/NEON/DepthConvertLayer.cpp
index e036cc45d1..a56298babc 100644
--- a/tests/validation/NEON/DepthConvert.cpp
+++ b/tests/validation/NEON/DepthConvertLayer.cpp
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConvert.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
@@ -33,7 +33,7 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/DepthConvertFixture.h"
+#include "tests/validation/fixtures/DepthConvertLayerFixture.h"
namespace arm_compute
{
@@ -44,43 +44,43 @@ namespace validation
namespace
{
/** Input data sets **/
-const auto DepthConvertU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
-const auto DepthConvertU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
-const auto DepthConvertU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
-const auto DepthConvertS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8));
-const auto DepthConvertFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16));
-const auto DepthConvertShiftDataset = framework::dataset::make("Shift", 0, 7);
-const auto DepthConvertFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7);
+const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
+const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
+const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
+const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8));
+const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16));
+const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7);
+const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7);
} // namespace
TEST_SUITE(NEON)
-TEST_SUITE(DepthConvert)
+TEST_SUITE(DepthConvertLayer)
template <typename T>
-using NEDepthConvertToU16Fixture = DepthConvertValidationFixture<Tensor, Accessor, NEDepthConvert, T, uint16_t>;
+using NEDepthConvertLayerToU16Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint16_t>;
template <typename T>
-using NEDepthConvertToS16Fixture = DepthConvertValidationFixture<Tensor, Accessor, NEDepthConvert, T, int16_t>;
+using NEDepthConvertLayerToS16Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, int16_t>;
template <typename T>
-using NEDepthConvertToS32Fixture = DepthConvertValidationFixture<Tensor, Accessor, NEDepthConvert, T, int32_t>;
+using NEDepthConvertLayerToS32Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, int32_t>;
template <typename T>
-using NEDepthConvertToU8Fixture = DepthConvertValidationFixture<Tensor, Accessor, NEDepthConvert, T, uint8_t>;
+using NEDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint8_t>;
template <typename T>
-using NEDepthConvertToU32Fixture = DepthConvertValidationFixture<Tensor, Accessor, NEDepthConvert, T, uint32_t>;
+using NEDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint32_t>;
template <typename T>
-using NEDepthConvertToFP32FixedPointFixture = DepthConvertValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvert, T, float>;
+using NEDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, float>;
template <typename T>
-using NEDepthConvertToQS8FixedPointFixture = DepthConvertValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvert, T, int8_t>;
+using NEDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, int8_t>;
template <typename T>
-using NEDepthConvertToQS16FixedPointFixture = DepthConvertValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvert, T, int16_t>;
+using NEDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, int16_t>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -90,7 +90,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -103,17 +103,17 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertToU16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU8toU16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toU16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToU16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU8toU16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toU16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -122,7 +122,7 @@ TEST_SUITE_END()
TEST_SUITE(U8_to_S16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -132,7 +132,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -145,17 +145,17 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertToS16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU8toS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToS16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU8toS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -163,7 +163,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToS16Fixture<uint8_t>, framework:
TEST_SUITE_END()
TEST_SUITE(U8_to_S32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -173,7 +173,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -186,17 +186,17 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU8toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToS32Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU8toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU8toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -205,7 +205,7 @@ TEST_SUITE_END()
TEST_SUITE(U16_to_U8)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -215,7 +215,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -228,16 +228,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertToU8Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToU8Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -246,7 +246,7 @@ TEST_SUITE_END()
TEST_SUITE(U16_to_U32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -256,7 +256,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -269,16 +269,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertToU32Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertU16toU32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToU32Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertU16toU32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerU16toU32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -287,7 +287,7 @@ TEST_SUITE_END()
TEST_SUITE(S16_to_U8)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -297,7 +297,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -310,16 +310,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertToU8Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertS16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToU8Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertS16toU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -328,7 +328,7 @@ TEST_SUITE_END()
TEST_SUITE(S16_to_S32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset),
+ DepthConvertLayerShiftDataset),
shape, policy, shift)
{
int fixed_point_position = 0;
@@ -338,7 +338,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -351,16 +351,16 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertToS32Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertS16toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertToS32Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertS16toS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertShiftDataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS16toS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ DepthConvertLayerShiftDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -370,7 +370,7 @@ TEST_SUITE_END()
TEST_SUITE(Quantized_to_FP32)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset),
+ DepthConvertLayerFixedPointQuantizedDataset),
shape, dt, policy, fixed_point_position)
{
int shift = 0;
@@ -380,7 +380,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
Tensor dst = create_tensor<Tensor>(shape, DataType::F32, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -392,30 +392,34 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
validate(src.info()->padding(), padding);
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertQS8toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerQS8toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertQS16toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerQS16toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS8, NEDepthConvertToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertQS8toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS8, NEDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerQS8toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS16, NEDepthConvertToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertQS16toFP32Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS16, NEDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerQS16toFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -425,7 +429,7 @@ TEST_SUITE_END()
TEST_SUITE(FP32_to_Quantized)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset),
+ DepthConvertLayerFixedPointQuantizedDataset),
shape, dt, policy, fixed_point_position)
{
int shift = 0;
@@ -435,7 +439,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
Tensor dst = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
// Create and Configure function
- NEDepthConvert depth_convert;
+ NEDepthConvertLayer depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
// Validate valid region
@@ -447,30 +451,34 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
validate(src.info()->padding(), padding);
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertFP32toQS8Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerFP32toQS8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertFP32toQS16Dataset),
+FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(),
+ DepthConvertLayerFP32toQS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS8, NEDepthConvertToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertFP32toQS8Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS8, NEDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerFP32toQS8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeQS16, NEDepthConvertToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertFP32toQS16Dataset),
+FIXTURE_DATA_TEST_CASE(RunLargeQS16, NEDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(),
+ DepthConvertLayerFP32toQS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertFixedPointQuantizedDataset))
+ DepthConvertLayerFixedPointQuantizedDataset))
{
// Validate output
validate(Accessor(_target), _reference);
diff --git a/tests/validation/NEON/DepthwiseConvolution.cpp b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
index 3a4b7aa2e9..17eaaf8ad7 100644
--- a/tests/validation/NEON/DepthwiseConvolution.cpp
+++ b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
@@ -22,17 +22,17 @@
* SOFTWARE.
*/
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolution.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
#include "tests/PaddingCalculator.h"
-#include "tests/datasets/DepthwiseConvolutionDataset.h"
+#include "tests/datasets/DepthwiseConvolutionLayerDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/DepthwiseConvolutionFixture.h"
+#include "tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h"
namespace arm_compute
{
@@ -48,7 +48,8 @@ constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**< Tolerance value fo
TEST_SUITE(NEON)
TEST_SUITE(DepthwiseConvolutionLayer)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallDepthwiseConvolutionDataset3x3(), datasets::LargeDepthwiseConvolutionDataset3x3()),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
+ datasets::LargeDepthwiseConvolutionLayerDataset3x3()),
framework::dataset::make("DataType", DataType::F32)),
input_shape, weights_shape, bias_shape, output_shape, info, data_type)
{
@@ -64,7 +65,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
- NEDepthwiseConvolution3x3 depthwise_layer;
+ NEDepthwiseConvolutionLayer3x3 depthwise_layer;
depthwise_layer.configure(&src, &weights, &bias, &dst, info);
// Validate valid region
@@ -88,14 +89,16 @@ TEST_SUITE(Float)
TEST_SUITE(F32)
TEST_SUITE(Generic)
template <typename T>
-using NEDepthwiseConvolutionFixture = DepthwiseConvolutionValidationFixture<Tensor, Accessor, NEDepthwiseConvolution, T>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallDepthwiseConvolutionDataset(), framework::dataset::make("DataType",
- DataType::F32)))
+using NEDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerValidationFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer, T>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionDataset(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionLayerDataset(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(Accessor(_target), _reference, tolerance_f32);
}
@@ -103,14 +106,16 @@ TEST_SUITE_END()
TEST_SUITE(W3x3)
template <typename T>
-using NEDepthwiseConvolutionFixture3x3 = DepthwiseConvolutionValidationFixture<Tensor, Accessor, NEDepthwiseConvolution3x3, T>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionFixture3x3<float>, framework::DatasetMode::ALL, combine(datasets::SmallDepthwiseConvolutionDataset3x3(), framework::dataset::make("DataType",
- DataType::F32)))
+using NEDepthwiseConvolutionLayerFixture3x3 = DepthwiseConvolutionLayerValidationFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer3x3, T>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionFixture3x3<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionDataset3x3(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerFixture3x3<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
{
validate(Accessor(_target), _reference, tolerance_f32);
}
diff --git a/tests/validation/NEON/L2Normalize.cpp b/tests/validation/NEON/L2NormalizeLayer.cpp
index ceffa6d510..c0f5920964 100644
--- a/tests/validation/NEON/L2Normalize.cpp
+++ b/tests/validation/NEON/L2NormalizeLayer.cpp
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEL2Normalize.h"
+#include "arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
@@ -32,7 +32,7 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/L2NormalizeFixture.h"
+#include "tests/validation/fixtures/L2NormalizeLayerFixture.h"
namespace arm_compute
{
@@ -47,20 +47,20 @@ RelativeTolerance<float> tolerance_f32(0.00001f);
} // namespace
TEST_SUITE(NEON)
-TEST_SUITE(L2Normalize)
+TEST_SUITE(L2NormalizeLayer)
template <typename T>
-using NEL2NormalizeFixture = L2NormalizeValidationFixture<Tensor, Accessor, NEL2Normalize, T>;
+using NEL2NormalizeLayerFixture = L2NormalizeLayerValidationFixture<Tensor, Accessor, NEL2NormalizeLayer, T>;
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEL2NormalizeFixture<float>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE(RunSmall, NEL2NormalizeLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEL2NormalizeFixture<float>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE(RunLarge, NEL2NormalizeLayerFixture<float>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
{
// Validate output
diff --git a/tests/validation/fixtures/DepthConcatenateLayerFixture.h b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
index 633dba23e0..103c73e4ea 100644
--- a/tests/validation/fixtures/DepthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
@@ -43,7 +43,7 @@ namespace test
namespace validation
{
template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T>
-class DepthConcatenateValidationFixture : public framework::Fixture
+class DepthConcatenateLayerValidationFixture : public framework::Fixture
{
public:
template <typename...>
diff --git a/tests/validation/fixtures/DepthConvertFixture.h b/tests/validation/fixtures/DepthConvertLayerFixture.h
index b132a9341d..c2fdc75bb4 100644
--- a/tests/validation/fixtures/DepthConvertFixture.h
+++ b/tests/validation/fixtures/DepthConvertLayerFixture.h
@@ -31,7 +31,7 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
-#include "tests/validation/CPP/DepthConvert.h"
+#include "tests/validation/CPP/DepthConvertLayer.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
@@ -41,7 +41,7 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class DepthConvertValidationFixedPointFixture : public framework::Fixture
+class DepthConvertLayerValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
@@ -106,23 +106,23 @@ protected:
int _shift{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class DepthConvertValidationFixture : public DepthConvertValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>
+class DepthConvertLayerValidationFixture : public DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
template <typename...>
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
- DepthConvertValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift, 0);
+ DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift, 0);
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class DepthConvertValidationFractionalBitsFixture : public DepthConvertValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>
+class DepthConvertLayerValidationFractionalBitsFixture : public DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
template <typename...>
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t fractional_bits)
{
- DepthConvertValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0, fractional_bits);
+ DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0, fractional_bits);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/DepthwiseConvolutionFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index b1d31d657a..0af3fdf6c4 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -31,7 +31,7 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
-#include "tests/validation/CPP/DepthwiseConvolution.h"
+#include "tests/validation/CPP/DepthwiseConvolutionLayer.h"
#include "tests/validation/Helpers.h"
#include <random>
@@ -43,7 +43,7 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DepthwiseConvolutionValidationGenericFixture : public framework::Fixture
+class DepthwiseConvolutionLayerValidationGenericFixture : public framework::Fixture
{
public:
using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
@@ -151,26 +151,26 @@ protected:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DepthwiseConvolutionValidationFixture : public DepthwiseConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+class DepthwiseConvolutionLayerValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(TensorShape in_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape out_shape, PadStrideInfo pad_stride_info, DataType data_type)
{
- DepthwiseConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info,
- data_type, QuantizationInfo());
+ DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info,
+ data_type, QuantizationInfo());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DepthwiseConvolutionValidationQuantizedFixture : public DepthwiseConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(TensorShape in_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape out_shape, PadStrideInfo pad_stride_info, DataType data_type, QuantizationInfo quantization_info)
{
- DepthwiseConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info,
- data_type, quantization_info);
+ DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info,
+ data_type, quantization_info);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/L2NormalizeFixture.h b/tests/validation/fixtures/L2NormalizeLayerFixture.h
index e6113937f1..7bb95883f7 100644
--- a/tests/validation/fixtures/L2NormalizeFixture.h
+++ b/tests/validation/fixtures/L2NormalizeLayerFixture.h
@@ -32,7 +32,7 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
-#include "tests/validation/CPP/L2Normalize.h"
+#include "tests/validation/CPP/L2NormalizeLayer.h"
namespace arm_compute
{
@@ -41,7 +41,7 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class L2NormalizeValidationFixture : public framework::Fixture
+class L2NormalizeLayerValidationFixture : public framework::Fixture
{
public:
template <typename...>