aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/NEON/NEKernels.h2
-rw-r--r--arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h (renamed from arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h)6
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvolutionLayer.h8
-rw-r--r--arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h (renamed from arm_compute/runtime/NEON/functions/NEWinogradLayer.h)14
-rw-r--r--docs/00_introduction.dox9
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp (renamed from src/core/NEON/kernels/NEWinogradLayerKernel.cpp)2
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp4
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp2
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp (renamed from src/runtime/NEON/functions/NEWinogradLayer.cpp)14
-rw-r--r--tests/benchmark/CL/ConvolutionLayer.cpp4
-rw-r--r--tests/benchmark/NEON/ConvolutionLayer.cpp22
-rw-r--r--tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h (renamed from tests/benchmark/fixtures/WinogradLayerFixture.h)8
-rw-r--r--tests/validation/CL/Winograd.cpp2
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp8
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h (renamed from tests/validation/fixtures/WinogradLayerFixture.h)2
17 files changed, 57 insertions, 56 deletions
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 31f4881ef5..0e271efa49 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -111,6 +111,6 @@
#include "arm_compute/core/NEON/kernels/NETransposeKernel.h"
#include "arm_compute/core/NEON/kernels/NEWarpKernel.h"
#include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
-#include "arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
#endif /* __ARM_COMPUTE_NEKERNELS_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
index 7284f9fdc4..9912076cd5 100644
--- a/arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__
-#define __ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__
+#ifndef __ARM_COMPUTE_NEGEMMWINOGRADCONVOLUTIONLAYERKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMWINOGRADCONVOLUTIONLAYERKERNEL_H__
#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/core/NEON/kernels/convolution/common/convolution.hpp"
@@ -550,4 +550,4 @@ private:
};
} // namespace arm_compute
-#endif /*__ARM_COMPUTE_NEGEMMWINOGRADLAYERKERNEL_H__*/
+#endif /*__ARM_COMPUTE_NEGEMMWINOGRADCONVOLUTIONLAYERKERNEL_H__*/
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 1531377e2e..8091acd258 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -109,6 +109,6 @@
#include "arm_compute/runtime/NEON/functions/NETranspose.h"
#include "arm_compute/runtime/NEON/functions/NEWarpAffine.h"
#include "arm_compute/runtime/NEON/functions/NEWarpPerspective.h"
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#endif /* __ARM_COMPUTE_NEFUNCTIONS_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
index 220d1cb249..ce9a3ed4f2 100644
--- a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
@@ -30,7 +30,7 @@
#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include <memory>
namespace arm_compute
@@ -38,9 +38,9 @@ namespace arm_compute
class ITensor;
/** Basic function to simulate a convolution layer. This function calls one of the following NEON functions:
- * -# @ref NEGEMMConvolutionLayer (executed only in case GEMM is required for the operation)
- * -# @ref NEWinogradLayer (executed only in case Winograd is required for the operation)
- * -# @ref NEDirectConvolutionLayer (executed only in case Direct Convolution is required for the operation)
+ * -# @ref NEGEMMConvolutionLayer (executed only in case GEMM is required for the operation)
+ * -# @ref NEWinogradConvolutionLayer (executed only in case Winograd is required for the operation)
+ * -# @ref NEDirectConvolutionLayer (executed only in case Direct Convolution is required for the operation)
*/
class NEConvolutionLayer : public IFunction
{
diff --git a/arm_compute/runtime/NEON/functions/NEWinogradLayer.h b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
index 8010810253..037c74c1a8 100644
--- a/arm_compute/runtime/NEON/functions/NEWinogradLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_NEWINOGRADLAYER_H__
-#define __ARM_COMPUTE_NEWINOGRADLAYER_H__
+#ifndef __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__
+#define __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__
#include "arm_compute/runtime/IFunction.h"
@@ -46,11 +46,11 @@ class ITensor;
* -# @ref NEWinogradLayerBatchedGEMMKernel
* -# @ref CPPPermute (three times: weights, input and output)
*/
-class NEWinogradLayer : public IFunction
+class NEWinogradConvolutionLayer : public IFunction
{
public:
/** Constructor */
- NEWinogradLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ NEWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
@@ -89,9 +89,9 @@ public:
const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEWinogradLayer(const NEWinogradLayer &) = delete;
+ NEWinogradConvolutionLayer(const NEWinogradConvolutionLayer &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- NEWinogradLayer &operator=(const NEWinogradLayer &) = delete;
+ NEWinogradConvolutionLayer &operator=(const NEWinogradConvolutionLayer &) = delete;
private:
MemoryGroup _memory_group;
@@ -119,4 +119,4 @@ private:
bool _is_activationlayer_enabled;
};
}
-#endif /* __ARM_COMPUTE_NEWINOGRADLAYER_H__ */
+#endif /* __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__ */
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index 39a7ee1a79..1d309cb80f 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -208,6 +208,7 @@ v18.03 Public maintenance release
- Updated recommended NDK version to r16b (And fixed warnings).
- Fixed bug in validation code.
- Added Inception v4 graph example.
+ - Renamed NEWinogradLayer.cpp to @ref NEWinogradConvolutionLayer
v18.02 Public major release
- Various NEON / OpenCL / GLES optimisations.
@@ -239,9 +240,9 @@ v18.02 Public major release
- Added name() method to all kernels.
- Added support for Winograd 5x5.
- @ref NEPermuteKernel / @ref NEPermute
- - @ref NEWinogradLayerTransformInputKernel / @ref NEWinogradLayer
- - @ref NEWinogradLayerTransformOutputKernel / @ref NEWinogradLayer
- - @ref NEWinogradLayerTransformWeightsKernel / @ref NEWinogradLayer
+ - @ref NEWinogradLayerTransformInputKernel / NEWinogradLayer
+ - @ref NEWinogradLayerTransformOutputKernel / NEWinogradLayer
+ - @ref NEWinogradLayerTransformWeightsKernel / NEWinogradLayer
- Renamed NEWinogradLayerKernel into @ref NEWinogradLayerBatchedGEMMKernel
- New GLES kernels / functions:
- @ref GCTensorShiftKernel / @ref GCTensorShift
@@ -313,7 +314,7 @@ v17.12 Public major release
- @ref NEGEMMLowpOffsetContributionKernel / @ref NEGEMMLowpMatrixAReductionKernel / @ref NEGEMMLowpMatrixBReductionKernel / @ref NEGEMMLowpMatrixMultiplyCore
- @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel / @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
- @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel / @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale
- - @ref NEWinogradLayer / NEWinogradLayerKernel
+ - NEWinogradLayer / NEWinogradLayerKernel
- New OpenCL kernels / functions
- @ref CLGEMMLowpOffsetContributionKernel / @ref CLGEMMLowpMatrixAReductionKernel / @ref CLGEMMLowpMatrixBReductionKernel / @ref CLGEMMLowpMatrixMultiplyCore
diff --git a/src/core/NEON/kernels/NEWinogradLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index 3cfe2af470..fa76194529 100644
--- a/src/core/NEON/kernels/NEWinogradLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/Error.h"
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index 906378c565..7a37dfa39d 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -169,8 +169,8 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
}
else if(conv_algorithm == ConvolutionMethod::WINOGRAD)
{
- std::tie(func, func_name) = create_named_memory_managed_function<NEWinogradLayer>(std::string("NEWinogradLayer"), mm,
- input, weights, biases, output, conv_info);
+ std::tie(func, func_name) = create_named_memory_managed_function<NEWinogradConvolutionLayer>(std::string("NEWinogradConvolutionLayer"), mm,
+ input, weights, biases, output, conv_info);
}
else
{
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index 074f03580f..e438e79c76 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -51,7 +51,7 @@ Status NENodeValidator::validate(INode *node)
return detail::validate_convolution_layer<NEConvolutionLayer,
NEDirectConvolutionLayer,
NEGEMMConvolutionLayer,
- NEWinogradLayer>(*polymorphic_downcast<ConvolutionLayerNode *>(node));
+ NEWinogradConvolutionLayer>(*polymorphic_downcast<ConvolutionLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
return detail::validate_depthwise_convolution_layer<NEDepthwiseConvolutionLayer,
NEDepthwiseConvolutionLayer3x3>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index 61ea2db15b..0ad4babedc 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -51,7 +51,7 @@ void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const
{
case ConvolutionMethod::WINOGRAD:
{
- auto f = arm_compute::support::cpp14::make_unique<NEWinogradLayer>(_memory_manager);
+ auto f = arm_compute::support::cpp14::make_unique<NEWinogradConvolutionLayer>(_memory_manager);
f->configure(input, weights, biases, output, conv_info, act_info);
_function = std::move(f);
break;
@@ -83,7 +83,7 @@ Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
{
case ConvolutionMethod::WINOGRAD:
//Validate Winograd
- NEWinogradLayer::validate(input, weights, biases, output, conv_info, act_info);
+ NEWinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, act_info);
break;
case ConvolutionMethod::GEMM:
//Validate Gemm-based Convolution
diff --git a/src/runtime/NEON/functions/NEWinogradLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index 7d93bcff07..a1256ac8cb 100644
--- a/src/runtime/NEON/functions/NEWinogradLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Utils.h"
@@ -32,7 +32,7 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "support/ToolchainSupport.h"
-#include "arm_compute/core/NEON/kernels/NEWinogradLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
@@ -79,14 +79,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
}
} //namespace
-NEWinogradLayer::NEWinogradLayer(std::shared_ptr<IMemoryManager> memory_manager)
+NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _arm_gemm(nullptr), _gemm_kernel(nullptr), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr),
_activationlayer_function(), _permute_input(), _permute_weights(), _permute_output(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(), _weights_hwio(),
_workspace(), _input(), _weights(), _output(), _reshaped_kernel(false), _is_activationlayer_enabled(false)
{
} /* arm_compute */
-void NEWinogradLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_UNUSED(conv_info);
@@ -255,7 +255,7 @@ void NEWinogradLayer::configure(const ITensor *input, const ITensor *weights, co
}
}
-void NEWinogradLayer::run()
+void NEWinogradConvolutionLayer::run()
{
_memory_group.acquire();
if(!_reshaped_kernel)
@@ -286,8 +286,8 @@ void NEWinogradLayer::run()
_memory_group.release();
}
-Status NEWinogradLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const ActivationLayerInfo &act_info)
+Status NEWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info));
diff --git a/tests/benchmark/CL/ConvolutionLayer.cpp b/tests/benchmark/CL/ConvolutionLayer.cpp
index 3cf04295e4..e1cf99b573 100644
--- a/tests/benchmark/CL/ConvolutionLayer.cpp
+++ b/tests/benchmark/CL/ConvolutionLayer.cpp
@@ -29,7 +29,7 @@
#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/benchmark/fixtures/ConvolutionLayerFixture.h"
-#include "tests/benchmark/fixtures/WinogradLayerFixture.h"
+#include "tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h"
#include "tests/datasets/system_tests/alexnet/AlexNetConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv1/GoogLeNetInceptionV1ConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv4/GoogLeNetInceptionV4ConvolutionLayerDataset.h"
@@ -57,7 +57,7 @@ using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture<CLTensor, CLGEMMCo
TEST_SUITE(CL)
-using CLWinogradLayerFixture = WinogradLayerFixture<CLTensor, CLWinogradConvolutionLayer, CLAccessor>;
+using CLWinogradLayerFixture = WinogradConvolutionLayerFixture<CLTensor, CLWinogradConvolutionLayer, CLAccessor>;
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, CLWinogradLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(),
diff --git a/tests/benchmark/NEON/ConvolutionLayer.cpp b/tests/benchmark/NEON/ConvolutionLayer.cpp
index e7f2788020..ac27e7ad31 100644
--- a/tests/benchmark/NEON/ConvolutionLayer.cpp
+++ b/tests/benchmark/NEON/ConvolutionLayer.cpp
@@ -24,12 +24,12 @@
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
#include "tests/benchmark/fixtures/ConvolutionLayerFixture.h"
-#include "tests/benchmark/fixtures/WinogradLayerFixture.h"
+#include "tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h"
#include "tests/datasets/system_tests/alexnet/AlexNetConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv1/GoogLeNetInceptionV1ConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv4/GoogLeNetInceptionV4ConvolutionLayerDataset.h"
@@ -62,27 +62,27 @@ using NEGEMMConvolutionLayerFixture = ConvolutionLayerFixture<Tensor, NEGEMMConv
TEST_SUITE(NEON)
#if defined(__aarch64__)
-using NEWinogradLayerFixture = WinogradLayerFixture<Tensor, NEWinogradLayer, Accessor>;
+using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFixture<Tensor, NEWinogradConvolutionLayer, Accessor>;
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1WinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1WinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4WinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4WinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", 1)));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::ALL,
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetWinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetWinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
framework::dataset::make("DataType", DataType::F32)),
@@ -170,25 +170,25 @@ REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2ConvolutionLayer, NEGEMMConvolutionLayerFi
framework::dataset::make("Batches", { 1, 4, 8 })));
#if defined(__aarch64__)
-REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
+REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV1WinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV1WinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4WinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
+REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4WinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4WinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Batches", { 4, 8 })));
-REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetWinogradLayer, NEWinogradLayerFixture, framework::DatasetMode::NIGHTLY,
+REGISTER_FIXTURE_DATA_TEST_CASE(SqueezeNetWinogradLayer, NEWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::SqueezeNetWinogradLayerDataset(),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
framework::dataset::make("DataType", DataType::F32)),
diff --git a/tests/benchmark/fixtures/WinogradLayerFixture.h b/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
index 0be535f4cc..8ed75af664 100644
--- a/tests/benchmark/fixtures/WinogradLayerFixture.h
+++ b/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_WINOGRADLAYERFIXTURE
-#define ARM_COMPUTE_TEST_WINOGRADLAYERFIXTURE
+#ifndef ARM_COMPUTE_TEST_WINOGRAD_CONVOLUTION_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_WINOGRAD_CONVOLUTION_LAYER_FIXTURE
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -38,7 +38,7 @@ namespace benchmark
{
/** Fixture that can be used for NEON and CL */
template <typename TensorType, typename Function, typename Accessor>
-class WinogradLayerFixture : public framework::Fixture
+class WinogradConvolutionLayerFixture : public framework::Fixture
{
public:
template <typename...>
@@ -98,4 +98,4 @@ private:
} // namespace benchmark
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WINOGRADLAYERFIXTURE */
+#endif /* ARM_COMPUTE_TEST_WINOGRAD_CONVOLUTION_LAYER_FIXTURE */
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 6e673a5f96..30d8d751af 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -41,7 +41,7 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/WinogradLayerFixture.h"
+#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
namespace arm_compute
{
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 3a365253cb..8b2e21e796 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -24,7 +24,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
@@ -37,7 +37,7 @@
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
-#include "tests/validation/fixtures/WinogradLayerFixture.h"
+#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
namespace arm_compute
{
@@ -109,10 +109,10 @@ TEST_SUITE_END()
TEST_SUITE(WinogradLayer)
template <typename T>
-using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerValidationFixture<Tensor, Accessor, NEWinogradLayer, T>;
+using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
template <typename T>
-using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerValidationFixture<Tensor, Accessor, NEWinogradLayer, T, false>;
+using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, false>;
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
diff --git a/tests/validation/fixtures/WinogradLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index 173444ccd8..249f9d5649 100644
--- a/tests/validation/fixtures/WinogradLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*