aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2021-09-13 13:38:29 +0100
committerGunes Bayir <gunes.bayir@arm.com>2021-09-15 19:13:17 +0000
commitcc171f9e4520e16b5e1b9c483562ed022d9151fa (patch)
treef364fb4dd1d8f775c21066ae53f61185257766cc
parent9d6ddfc8a0ea579fb2a46fab971f6827d93fc96b (diff)
downloadComputeLibrary-cc171f9e4520e16b5e1b9c483562ed022d9151fa.tar.gz
Provide tests for fusing pad layer in graph API
There are two tests: - A unit test that checks if certain padding configurations are to be fused or not - A fixture test that compares a reference implementation of pad+conv vs target implementation using the same fusing logic as graph API Tests are written for CL backend only to prevent code duplication. The code written in the graph API remains untested. Resolves: COMPMID-4702 Change-Id: Ie84d1cb910013033b46ac9d66cf5fc556d4963d2 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6252 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Sheri Zhang <sheri.zhang@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/graph/Utils.cpp1
-rw-r--r--src/graph/mutators/MutatorUtils.cpp52
-rw-r--r--src/graph/mutators/MutatorUtils.h42
-rw-r--r--src/graph/mutators/NodeFusionMutator.cpp38
-rw-r--r--tests/datasets/SmallConvolutionLayerDataset.h11
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp14
-rw-r--r--tests/validation/CL/PadLayer.cpp61
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h70
8 files changed, 253 insertions, 36 deletions
diff --git a/src/graph/Utils.cpp b/src/graph/Utils.cpp
index 7db06b9c70..dcab177a3b 100644
--- a/src/graph/Utils.cpp
+++ b/src/graph/Utils.cpp
@@ -225,5 +225,6 @@ void configure_tensor(Tensor *tensor)
tensor->set_handle(std::move(handle));
}
}
+
} // namespace graph
} // namespace arm_compute
diff --git a/src/graph/mutators/MutatorUtils.cpp b/src/graph/mutators/MutatorUtils.cpp
new file mode 100644
index 0000000000..c8f38f34e7
--- /dev/null
+++ b/src/graph/mutators/MutatorUtils.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/graph/mutators/MutatorUtils.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+bool is_padding_in_height_or_width(const DataLayout &layout, const PaddingList &padding_list)
+{
+ if(layout == DataLayout::NCHW || layout == DataLayout::NHWC)
+ {
+ const unsigned int height_index = get_dimension_idx(layout, DataLayoutDimension::HEIGHT);
+ const unsigned int width_index = get_dimension_idx(layout, DataLayoutDimension::WIDTH);
+
+ for(unsigned int i = 0; i < padding_list.size(); ++i)
+ {
+ if(i != height_index && i != width_index && padding_list[i] != PaddingInfo(0, 0))
+ {
+ // if the index is not either height or width, don't fuse
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/mutators/MutatorUtils.h b/src/graph/mutators/MutatorUtils.h
new file mode 100644
index 0000000000..170d892c93
--- /dev/null
+++ b/src/graph/mutators/MutatorUtils.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_GRAPH_MUTATOR_UTILS_H
+#define ARM_COMPUTE_GRAPH_MUTATOR_UTILS_H
+
+#include "arm_compute/graph/Utils.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Check if padding is in height and/or width dimensions
+ *
+ * @param[in] layout Data layout of the tensor
+ * @param[in] padding_list List of padding pairs
+ */
+bool is_padding_in_height_or_width(const DataLayout &layout, const PaddingList &padding_list);
+} // namespace graph
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_GRAPH_MUTATOR_UTILS_H */ \ No newline at end of file
diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp
index b530fb0c00..e37164c60c 100644
--- a/src/graph/mutators/NodeFusionMutator.cpp
+++ b/src/graph/mutators/NodeFusionMutator.cpp
@@ -30,6 +30,8 @@
#include "arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h"
#include "arm_compute/graph/nodes/Nodes.h"
+#include "src/graph/mutators/MutatorUtils.h"
+
#include "support/Cast.h"
#include <set>
@@ -265,33 +267,6 @@ void fuse_node_with_activation(Graph &g, const Edge *output_edge, const std::set
}
}
-bool check_padding_info(const DataLayout &layout, const PaddingList &padding_list, PaddingInfo &pad_w, PaddingInfo &pad_h)
-{
- if(layout == DataLayout::NCHW || layout == DataLayout::NHWC)
- {
- const PaddingInfo zero_padding(0, 0);
-
- const unsigned int height_index = get_dimension_idx(layout, DataLayoutDimension::HEIGHT);
- const unsigned int width_index = get_dimension_idx(layout, DataLayoutDimension::WIDTH);
-
- pad_w = width_index < padding_list.size() ? padding_list[width_index] : zero_padding;
- pad_h = height_index < padding_list.size() ? padding_list[height_index] : zero_padding;
-
- for(unsigned int i = 0; i < padding_list.size(); i++)
- {
- if(i != height_index && i != width_index && padding_list[i] != zero_padding)
- {
- // if the index is not either height or width, don't fuse
- return false;
- }
- }
-
- return true;
- }
-
- return false;
-}
-
template <typename N>
void fuse_pad_with_convolution(Graph &g, const Edge *output_edge)
{
@@ -304,9 +279,14 @@ void fuse_pad_with_convolution(Graph &g, const Edge *output_edge)
{
const DataLayout layout = input_edge->tensor()->desc().layout;
const PaddingList padding_list = pad_node->padding();
- PaddingInfo pad_w, pad_h;
- if(check_padding_info(layout, padding_list, pad_w, pad_h))
+ const unsigned int height_index = get_dimension_idx(layout, DataLayoutDimension::HEIGHT);
+ const unsigned int width_index = get_dimension_idx(layout, DataLayoutDimension::WIDTH);
+
+ const PaddingInfo pad_w = width_index < padding_list.size() ? padding_list[width_index] : PaddingInfo(0, 0);
+ const PaddingInfo pad_h = height_index < padding_list.size() ? padding_list[height_index] : PaddingInfo(0, 0);
+
+ if(is_padding_in_height_or_width(layout, padding_list))
{
// Add paddings to the convolution node
const PadStrideInfo conv_info = conv_node->convolution_info();
diff --git a/tests/datasets/SmallConvolutionLayerDataset.h b/tests/datasets/SmallConvolutionLayerDataset.h
index 7d1db5a73e..67eade1e64 100644
--- a/tests/datasets/SmallConvolutionLayerDataset.h
+++ b/tests/datasets/SmallConvolutionLayerDataset.h
@@ -181,6 +181,17 @@ public:
}
};
+class SmallConvolutionLayerPrePaddingDataset final : public ConvolutionLayerDataset
+{
+public:
+ SmallConvolutionLayerPrePaddingDataset()
+ {
+ // output shape is calculated by accounting pre-padding layer as well -- all the data is in nchw
+ add_config(TensorShape(17U, 31U, 2U), TensorShape(5U, 5U, 2U, 19U), TensorShape(19U), TensorShape(17U, 16U, 19U), PadStrideInfo(1, 2, 1, 1));
+ add_config(TensorShape(33U, 27U, 7U), TensorShape(5U, 5U, 7U, 16U), TensorShape(16U), TensorShape(12U, 13U, 16U), PadStrideInfo(3, 2, 2, 0));
+ }
+};
+
class SmallConvolutionLayerReducedDataset final : public ConvolutionLayerDataset
{
public:
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 6824ce1413..ae2949c767 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -188,6 +188,8 @@ template <typename T>
using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
template <typename T>
using CLGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true>;
+template <typename T>
+using CLConvolutionValidationWithPaddingFixture = ConvolutionValidationWithPaddingFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
@@ -232,6 +234,18 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayout
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunSmallWithPadding, CLConvolutionValidationWithPaddingFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerPrePaddingDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
+framework::dataset::make("PrePadLayer", { PaddingList({ { 1, 1 }, { 1, 1 } }) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
diff --git a/tests/validation/CL/PadLayer.cpp b/tests/validation/CL/PadLayer.cpp
index 370195b078..ea0cb32785 100644
--- a/tests/validation/CL/PadLayer.cpp
+++ b/tests/validation/CL/PadLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include "arm_compute/graph/Utils.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
+#include "src/graph/mutators/MutatorUtils.h"
#include "tests/CL/CLAccessor.h"
#include "tests/Globals.h"
#include "tests/datasets/ShapeDatasets.h"
@@ -110,6 +112,63 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
ARM_COMPUTE_EXPECT(bool(CLPadLayer::validate(&input_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), padding, PixelValue(), mode)) == expected, framework::LogLevel::ERRORS);
}
+DATA_TEST_CASE(CheckFusingWithConvolution, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("DataLayout", { DataLayout::NCHW,
+ DataLayout::NCHW,
+ DataLayout::NCHW,
+ DataLayout::NCHW,
+ DataLayout::NCHW,
+ DataLayout::NCHW,
+ DataLayout::NCHW,
+ DataLayout::NCHW,
+ DataLayout::NHWC,
+ DataLayout::NHWC,
+ DataLayout::NHWC,
+ DataLayout::NHWC,
+ DataLayout::NHWC,
+ DataLayout::NHWC,
+ DataLayout::NHWC,
+ DataLayout::UNKNOWN
+ }),
+ framework::dataset::make("PaddingList", { PaddingList({{0, 0}, {1, 1}, {1, 1}}), // nchw
+ PaddingList({{1, 1}, {1, 1}, {0, 0}, {0, 0}}),
+ PaddingList({{1, 1}, {1, 1}}),
+ PaddingList({}),
+ PaddingList({{0, 0}}),
+ PaddingList({{0, 0}, {0, 0}, {0, 0}, {0, 0}}),
+ PaddingList({{0, 0}, {0, 0}, {0, 0}, {1, 0}}),
+ PaddingList({{0, 1}}),
+ PaddingList({{0, 0}, {1, 1}, {1, 1}}), // nhwc
+ PaddingList({{0, 0}, {0, 0}, {1, 1}, {1, 1}}),
+ PaddingList({{0, 0}, {1, 0}, {1, 1}, {0, 0}}),
+ PaddingList({}),
+ PaddingList({{0, 0}}),
+ PaddingList({{0, 1}}),
+ PaddingList({{0, 0}, {1, 1}}),
+ PaddingList({{0, 0}})
+ })), // unknown
+ framework::dataset::make("Expected", { false, // nchw
+ true,
+ true,
+ true,
+ true,
+ true,
+ false,
+ true,
+ true, // nhwc
+ false,
+ true,
+ true,
+ true,
+ false,
+ true,
+ false // unknown
+ })),
+ data_layout, padding_list, expected)
+{
+ ARM_COMPUTE_EXPECT(expected == arm_compute::graph::is_padding_in_height_or_width(data_layout, padding_list), framework::LogLevel::ERRORS);
+}
+
// clang-format on
// *INDENT-ON*
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 6dbf3d5731..0b3f070e9c 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -26,7 +26,9 @@
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/graph/Utils.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/graph/mutators/MutatorUtils.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
@@ -35,6 +37,7 @@
#include "tests/validation/Helpers.h"
#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/ConvolutionLayer.h"
+#include "tests/validation/reference/PadLayer.h"
#include "tests/validation/reference/Permute.h"
#include "tests/validation/reference/Utils.h"
@@ -70,7 +73,7 @@ public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
- bool mixed_layout = false)
+ bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}))
{
_mixed_layout = mixed_layout;
_data_type = data_type;
@@ -83,8 +86,8 @@ public:
_weight_quantization_info = weight_quantization_info;
_data_layout = data_layout;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer);
}
protected:
@@ -179,8 +182,9 @@ protected:
}
}
+ // given input is IN nchw format
TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info)
+ bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
{
ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
@@ -191,6 +195,18 @@ protected:
permute(input_shape, PermutationVector(2U, 0U, 1U));
permute(weights_shape, PermutationVector(2U, 0U, 1U));
permute(output_shape, PermutationVector(2U, 0U, 1U));
+
+ if(pre_pad_layer.size() > 0)
+ {
+ // make sure paddings exist for each c,h,w dimensions
+ for(unsigned int i = 0; i < 3 - pre_pad_layer.size(); ++i)
+ {
+ pre_pad_layer.push_back({ 0, 0 });
+ }
+
+ // rotate padding info from nchw to nhwc
+ std::rotate(pre_pad_layer.begin(), pre_pad_layer.begin() + 2, pre_pad_layer.begin() + 3);
+ }
}
const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
@@ -207,7 +223,30 @@ protected:
// Create and configure function
FunctionType conv;
- detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
+
+ const unsigned int height_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::HEIGHT);
+ const unsigned int width_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::WIDTH);
+
+ const PaddingInfo pad_w = width_index < pre_pad_layer.size() ? pre_pad_layer[width_index] : PaddingInfo(0, 0);
+ const PaddingInfo pad_h = height_index < pre_pad_layer.size() ? pre_pad_layer[height_index] : PaddingInfo(0, 0);
+
+ if(pre_pad_layer.size() > 0 && arm_compute::graph::is_padding_in_height_or_width(_data_layout, pre_pad_layer))
+ {
+ // this is the logic implemented in NodeFusionMutator -> fuse_pad_with_convolution
+ const PadStrideInfo new_conv_info(
+ info.stride().first,
+ info.stride().second,
+ info.pad_left() + pad_w.first,
+ info.pad_right() + pad_w.second,
+ info.pad_top() + pad_h.first,
+ info.pad_bottom() + pad_h.second,
+ info.round());
+ detail::configure_conv_function(conv, &src, &weights, &bias, &dst, new_conv_info, weights_info, dilation, act_info, num_groups);
+ }
+ else
+ {
+ detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
+ }
ARM_COMPUTE_ASSERT(src.info()->is_resizable());
ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
@@ -246,7 +285,7 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- const Size2D &dilation, const ActivationLayerInfo act_info)
+ const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
{
ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
@@ -273,6 +312,11 @@ protected:
regularize_values(static_cast<void *>(weights.data()), weights.num_elements());
}
+ if(pre_pad_layer.size() > 0)
+ {
+ src = reference::pad_layer<T>(src, pre_pad_layer, PixelValue(0), PaddingMode::CONSTANT);
+ }
+
return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
act_info) :
reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
@@ -307,6 +351,20 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
+class ConvolutionValidationWithPaddingFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
+ DataLayout data_layout, ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
+ data_type, data_type, data_layout,
+ QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout, pre_pad_layer);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public: