aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/graph/LayerDescriptors.h2
-rw-r--r--arm_compute/graph/Utils.h4
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLConcatenateLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEConcatenateLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h2
-rw-r--r--src/graph/GraphBuilder.cpp34
-rw-r--r--src/graph/Utils.cpp12
-rw-r--r--src/graph/mutators/DepthConcatSubTensorMutator.cpp4
-rw-r--r--src/graph/mutators/GroupedConvolutionMutator.cpp6
-rw-r--r--src/graph/nodes/ConcatenateLayerNode.cpp2
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp9
-rw-r--r--src/graph/nodes/DeconvolutionLayerNode.cpp9
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp7
-rw-r--r--src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp7
-rw-r--r--src/graph/nodes/PoolingLayerNode.cpp7
-rw-r--r--src/graph/nodes/ReorgLayerNode.cpp9
-rw-r--r--src/graph/nodes/ResizeLayerNode.cpp7
-rw-r--r--src/graph/nodes/UpsampleLayerNode.cpp7
-rw-r--r--src/runtime/CL/functions/CLConcatenateLayer.cpp19
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp2
-rw-r--r--src/runtime/NEON/functions/NEConcatenateLayer.cpp15
-rw-r--r--src/runtime/NEON/functions/NEPadLayer.cpp2
-rw-r--r--tests/validation/CL/DepthConcatenateLayer.cpp4
-rw-r--r--tests/validation/CL/WidthConcatenateLayer.cpp4
-rw-r--r--tests/validation/NEON/DepthConcatenateLayer.cpp16
-rw-r--r--tests/validation/NEON/HeightConcatenateLayer.cpp2
-rw-r--r--tests/validation/NEON/WidthConcatenateLayer.cpp3
-rw-r--r--tests/validation/fixtures/ConcatenateLayerFixture.h16
29 files changed, 111 insertions, 114 deletions
diff --git a/arm_compute/graph/LayerDescriptors.h b/arm_compute/graph/LayerDescriptors.h
index 79099326ec..f52beab523 100644
--- a/arm_compute/graph/LayerDescriptors.h
+++ b/arm_compute/graph/LayerDescriptors.h
@@ -32,7 +32,7 @@ namespace graph
{
namespace descriptors
{
-/** Common node parameters */
+/** Concatenate layer descriptor */
struct ConcatLayerDescriptor
{
/** Default constructor */
diff --git a/arm_compute/graph/Utils.h b/arm_compute/graph/Utils.h
index 4ffccec9be..2fa2f3b627 100644
--- a/arm_compute/graph/Utils.h
+++ b/arm_compute/graph/Utils.h
@@ -110,12 +110,12 @@ void release_default_graph_context(GraphContext &ctx);
size_t get_dimension_size(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension);
/** Get index of a tensor's given dimension depending on its layout
*
- * @param[in] descriptor Descriptor
+ * @param[in] data_layout Data layout of the tensor
* @param[in] data_layout_dimension Tensor data layout dimension
*
* @return Idx of given dimension
*/
-size_t get_dimension_idx(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension);
+size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension);
/** Get the list of driving nodes of a given node
*
* @param[in] node Node to find the driving node of
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index e05f4bc8cf..f6e6286a19 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -28,6 +28,7 @@
#include "arm_compute/graph/Tensor.h"
#include "arm_compute/graph/TypePrinter.h"
#include "arm_compute/graph/Types.h"
+#include "arm_compute/graph/Utils.h"
#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
#include "arm_compute/graph/backends/Utils.h"
#include "arm_compute/graph/nodes/Nodes.h"
@@ -321,7 +322,8 @@ std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLaye
inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
}
typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
- const DataLayoutDimension concat_axis = node.concatenation_axis();
+ const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
+ const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
// Create and configure function
auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
diff --git a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
index 5cf09c8ee0..d85a4453d8 100644
--- a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
@@ -59,7 +59,7 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
* @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1 and 2.
*/
- void configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output, DataLayoutDimension axis);
+ void configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output, size_t axis);
/** Static function to check if given info will lead to a valid configuration of @ref CLConcatenateLayer
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
@@ -71,7 +71,7 @@ public:
*
* @return a status
*/
- static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis);
+ static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
void run() override;
@@ -81,5 +81,5 @@ private:
unsigned int _num_inputs;
unsigned int _axis;
};
-}
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_CLCONCATENATELAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
index 7dfbcf9199..f8cda326d2 100644
--- a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -59,7 +59,7 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
* @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1 and 2.
*/
- void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis);
+ void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, size_t axis);
/** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
@@ -71,7 +71,7 @@ public:
*
* @return a status
*/
- static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis);
+ static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index da38151e73..e2f2c4c44c 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -89,5 +89,5 @@ private:
std::unique_ptr<NEFillBorderKernel[]> _border_handlers_vector;
unsigned int _num_inputs;
};
-}
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEDEPTHCONCATENATE_H__ */
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index b96a242acf..9f8dd69922 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -221,14 +221,15 @@ NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPa
// Get input tensor descriptor
const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+ const DataLayout input_data_layout = input_tensor_desc.layout;
// Create weights node
TensorDescriptor w_desc = input_tensor_desc;
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL),
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::CHANNEL),
get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL) / num_groups);
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::BATCHES), depth);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::BATCHES), depth);
if(!weights_quant_info.empty())
{
w_desc.quant_info = weights_quant_info;
@@ -275,14 +276,15 @@ NodeID GraphBuilder::add_deconvolution_node(Graph &g, NodeParams params, NodeIdx
// Get input tensor descriptor
const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+ const DataLayout input_data_layout = input_tensor_desc.layout;
// Create weights node
TensorDescriptor w_desc = input_tensor_desc;
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL),
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::CHANNEL),
get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL));
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::BATCHES), depth);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::BATCHES), depth);
NodeID w_nid = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
@@ -328,12 +330,13 @@ NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params,
// Get input tensor descriptor
const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+ const DataLayout input_data_layout = input_tensor_desc.layout;
// Create weights node
TensorDescriptor w_desc = input_tensor_desc;
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
- w_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL),
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::WIDTH), kernel_spatial_extend.width);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::HEIGHT), kernel_spatial_extend.height);
+ w_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::CHANNEL),
get_dimension_size(input_tensor_desc, DataLayoutDimension::CHANNEL) * depth_multiplier);
if(!quant_info.empty())
{
@@ -595,13 +598,14 @@ NodeID GraphBuilder::add_scale_layer(Graph &g, const NodeParams &params, NodeIdx
// Get input tensor descriptor
const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+ const DataLayout input_data_layout = input_tensor_desc.layout;
// Create mul node
TensorDescriptor mul_desc = input_tensor_desc;
- const size_t C = input_tensor_desc.shape[get_dimension_idx(mul_desc, DataLayoutDimension::CHANNEL)];
- mul_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::WIDTH), 1);
- mul_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::HEIGHT), 1);
- mul_desc.shape.set(get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL), C);
+ const size_t C = input_tensor_desc.shape[get_dimension_idx(input_data_layout, DataLayoutDimension::CHANNEL)];
+ mul_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::WIDTH), 1);
+ mul_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::HEIGHT), 1);
+ mul_desc.shape.set(get_dimension_idx(input_data_layout, DataLayoutDimension::CHANNEL), C);
NodeID mul_const_nid = add_const_node_with_name(g, params, "Mul", mul_desc, std::move(mul_accessor));
NodeIdxPair mul_const_nidxp = { mul_const_nid, 0 };
diff --git a/src/graph/Utils.cpp b/src/graph/Utils.cpp
index 71a6fc582b..4c34dd85a5 100644
--- a/src/graph/Utils.cpp
+++ b/src/graph/Utils.cpp
@@ -119,12 +119,12 @@ void setup_requested_backend_context(GraphContext &ctx, Target target)
size_t get_dimension_size(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension)
{
ARM_COMPUTE_ERROR_ON_MSG(descriptor.layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
- return descriptor.shape[get_dimension_idx(descriptor, data_layout_dimension)];
+ return descriptor.shape[get_dimension_idx(descriptor.layout, data_layout_dimension)];
}
-size_t get_dimension_idx(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension)
+size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
{
- ARM_COMPUTE_ERROR_ON_MSG(descriptor.layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
+ ARM_COMPUTE_ERROR_ON_MSG(data_layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
/* Return the index based on the data layout
* [N C H W]
@@ -134,13 +134,13 @@ size_t get_dimension_idx(const TensorDescriptor &descriptor, const DataLayoutDim
switch(data_layout_dimension)
{
case DataLayoutDimension::CHANNEL:
- return (descriptor.layout == DataLayout::NCHW) ? 2 : 0;
+ return (data_layout == DataLayout::NCHW) ? 2 : 0;
break;
case DataLayoutDimension::HEIGHT:
- return (descriptor.layout == DataLayout::NCHW) ? 1 : 2;
+ return (data_layout == DataLayout::NCHW) ? 1 : 2;
break;
case DataLayoutDimension::WIDTH:
- return (descriptor.layout == DataLayout::NCHW) ? 0 : 1;
+ return (data_layout == DataLayout::NCHW) ? 0 : 1;
break;
case DataLayoutDimension::BATCHES:
return 3;
diff --git a/src/graph/mutators/DepthConcatSubTensorMutator.cpp b/src/graph/mutators/DepthConcatSubTensorMutator.cpp
index 0e0a26b886..7994541b78 100644
--- a/src/graph/mutators/DepthConcatSubTensorMutator.cpp
+++ b/src/graph/mutators/DepthConcatSubTensorMutator.cpp
@@ -62,9 +62,9 @@ void DepthConcatSubTensorMutator::mutate(Graph &g)
// Get output tensor
auto output_tensor = node->output(0);
- // Check concatenation axis (Sub-tensor optimization is support for concatenation axis >=2)
+ // Check concatenation axis (Sub-tensor optimization is supported for concatenation axis >=2)
auto *concat_node = arm_compute::utils::cast::polymorphic_downcast<ConcatenateLayerNode *>(node);
- if(output_tensor == nullptr || get_dimension_idx(output_tensor->desc(), concat_node->concatenation_axis()) < 2)
+ if(output_tensor == nullptr || get_dimension_idx(output_tensor->desc().layout, concat_node->concatenation_axis()) < 2)
{
continue;
}
diff --git a/src/graph/mutators/GroupedConvolutionMutator.cpp b/src/graph/mutators/GroupedConvolutionMutator.cpp
index d69d2cd7d0..3d53f49218 100644
--- a/src/graph/mutators/GroupedConvolutionMutator.cpp
+++ b/src/graph/mutators/GroupedConvolutionMutator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,12 +47,12 @@ NodeID create_grouped_convolution(Graph &g, const NodeParams &params, NodeIdxPai
// Split input
const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
- const unsigned int input_idx = get_dimension_idx(input_tensor_desc, DataLayoutDimension::CHANNEL);
+ const unsigned int input_idx = get_dimension_idx(input_tensor_desc.layout, DataLayoutDimension::CHANNEL);
NodeID input_split = GraphBuilder::add_split_node(g, params, input, num_groups, input_idx);
// Split weights
const TensorDescriptor weights_tensor_desc = get_tensor_descriptor(g, g.node(weights)->outputs()[0]);
- const unsigned int batch_idx = get_dimension_idx(weights_tensor_desc, DataLayoutDimension::BATCHES);
+ const unsigned int batch_idx = get_dimension_idx(weights_tensor_desc.layout, DataLayoutDimension::BATCHES);
NodeID weights_split = GraphBuilder::add_split_node(g, params, { weights, 0 }, num_groups, batch_idx);
// Split bias
diff --git a/src/graph/nodes/ConcatenateLayerNode.cpp b/src/graph/nodes/ConcatenateLayerNode.cpp
index bbdc4dc029..48da8b6e9e 100644
--- a/src/graph/nodes/ConcatenateLayerNode.cpp
+++ b/src/graph/nodes/ConcatenateLayerNode.cpp
@@ -67,7 +67,7 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect
ARM_COMPUTE_ERROR_ON(input_descriptors.size() == 0);
TensorDescriptor output_descriptor = input_descriptors[0];
- const int axis_idx = get_dimension_idx(output_descriptor, axis);
+ const int axis_idx = get_dimension_idx(output_descriptor.layout, axis);
// Extract shapes
std::vector<const TensorShape *> shapes;
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index 15c7ff68f8..1c8dcaecfc 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -97,10 +97,11 @@ TensorDescriptor ConvolutionLayerNode::compute_output_descriptor(const TensorDes
std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
return output_descriptor;
}
diff --git a/src/graph/nodes/DeconvolutionLayerNode.cpp b/src/graph/nodes/DeconvolutionLayerNode.cpp
index e7ccffd04f..b1a6db7ccc 100644
--- a/src/graph/nodes/DeconvolutionLayerNode.cpp
+++ b/src/graph/nodes/DeconvolutionLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,10 +66,11 @@ TensorDescriptor DeconvolutionLayerNode::compute_output_descriptor(const TensorD
info.pad().first, info.pad().second,
info.stride().first, info.stride().second);
+ const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
return output_descriptor;
}
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index 935902d3fd..cdd9e7b601 100644
--- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -86,10 +86,11 @@ TensorDescriptor DepthwiseConvolutionLayerNode::compute_output_descriptor(const
std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), input_channels * depth_multiplier);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), input_channels * depth_multiplier);
return output_descriptor;
}
diff --git a/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp b/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
index 27a348fa69..c304a6c605 100644
--- a/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
+++ b/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
@@ -102,10 +102,11 @@ TensorDescriptor FusedConvolutionBatchNormalizationNode::compute_output_descript
std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, kernel_width, kernel_height, info);
+ const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), output_width);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), output_height);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), output_width);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), output_height);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), weights_descriptor.shape[3]);
return output_descriptor;
}
diff --git a/src/graph/nodes/PoolingLayerNode.cpp b/src/graph/nodes/PoolingLayerNode.cpp
index 26c145ae31..48b93c9158 100644
--- a/src/graph/nodes/PoolingLayerNode.cpp
+++ b/src/graph/nodes/PoolingLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,9 +57,10 @@ TensorDescriptor PoolingLayerNode::compute_output_descriptor(const TensorDescrip
std::tie(pooled_width, pooled_height) = scaled_dimensions(input_width, input_height, pool_size_x, pool_size_y, info.pad_stride_info());
+ const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), pooled_width);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), pooled_height);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), pooled_width);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), pooled_height);
return output_descriptor;
}
diff --git a/src/graph/nodes/ReorgLayerNode.cpp b/src/graph/nodes/ReorgLayerNode.cpp
index 6b83f6b90c..21ad451c3e 100644
--- a/src/graph/nodes/ReorgLayerNode.cpp
+++ b/src/graph/nodes/ReorgLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,10 +53,11 @@ TensorDescriptor ReorgLayerNode::compute_output_descriptor(const TensorDescripto
ARM_COMPUTE_ERROR_ON_MSG((input_width % stride != 0), "The width of the input tensor must be a multiple of stride");
ARM_COMPUTE_ERROR_ON_MSG((input_height % stride != 0), "The height of the input tensor must be a multiple of stride");
+ const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), input_width / stride);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), input_height / stride);
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::CHANNEL), input_channel * stride * stride);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), input_width / stride);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), input_height / stride);
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::CHANNEL), input_channel * stride * stride);
return output_descriptor;
}
diff --git a/src/graph/nodes/ResizeLayerNode.cpp b/src/graph/nodes/ResizeLayerNode.cpp
index a6aa7bfe5c..a399229013 100644
--- a/src/graph/nodes/ResizeLayerNode.cpp
+++ b/src/graph/nodes/ResizeLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -68,9 +68,10 @@ TensorDescriptor ResizeLayerNode::configure_output(size_t idx) const
const Tensor *src = input(0);
ARM_COMPUTE_ERROR_ON(src == nullptr);
+ const DataLayout data_layout = src->desc().layout;
TensorDescriptor output_desc = src->desc();
- size_t width_idx = get_dimension_idx(output_desc, DataLayoutDimension::WIDTH);
- size_t height_idx = get_dimension_idx(output_desc, DataLayoutDimension::HEIGHT);
+ size_t width_idx = get_dimension_idx(data_layout, DataLayoutDimension::WIDTH);
+ size_t height_idx = get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT);
output_desc.shape.set(width_idx, static_cast<int>(output_desc.shape[width_idx] * _scale_width));
output_desc.shape.set(height_idx, static_cast<int>(output_desc.shape[height_idx] * _scale_height));
diff --git a/src/graph/nodes/UpsampleLayerNode.cpp b/src/graph/nodes/UpsampleLayerNode.cpp
index bdd39e8ebd..88af122a59 100644
--- a/src/graph/nodes/UpsampleLayerNode.cpp
+++ b/src/graph/nodes/UpsampleLayerNode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,9 +54,10 @@ TensorDescriptor UpsampleLayerNode::compute_output_descriptor(const TensorDescri
const unsigned int input_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
const unsigned int input_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const DataLayout data_layout = input_descriptor.layout;
TensorDescriptor output_descriptor = input_descriptor;
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::WIDTH), input_width * info.x());
- output_descriptor.shape.set(get_dimension_idx(output_descriptor, DataLayoutDimension::HEIGHT), input_height * info.y());
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::WIDTH), input_width * info.x());
+ output_descriptor.shape.set(get_dimension_idx(data_layout, DataLayoutDimension::HEIGHT), input_height * info.y());
return output_descriptor;
}
diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp
index 7edea3efac..b9b3c5bb80 100644
--- a/src/runtime/CL/functions/CLConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp
@@ -44,10 +44,10 @@ CLConcatenateLayer::CLConcatenateLayer()
{
}
-void CLConcatenateLayer::configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output, DataLayoutDimension axis)
+void CLConcatenateLayer::configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output, size_t axis)
{
ARM_COMPUTE_ERROR_ON(output == nullptr);
- _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis);
+ _axis = axis;
_num_inputs = inputs_vector.size();
std::vector<ITensorInfo *> inputs_vector_info(inputs_vector.size());
@@ -135,30 +135,29 @@ void CLConcatenateLayer::configure(const std::vector<ICLTensor *> &inputs_vector
}
}
-Status CLConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis)
+Status CLConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
{
ARM_COMPUTE_RETURN_ERROR_ON(output == nullptr);
const unsigned int num_inputs = inputs_vector.size();
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON(num_inputs < 2);
- const unsigned int _axis = get_data_layout_dimension_index(inputs_vector[0]->data_layout(), axis);
// Output auto inizialitation if not yet initialized
TensorInfo tmp_output_info = *output->clone();
TensorShape output_shape{};
- if(_axis == Window::DimZ)
+ if(axis == Window::DimZ)
{
output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
}
else
{
- output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
+ output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
}
auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
unsigned int offset = 0;
- switch(_axis)
+ switch(axis)
{
case Window::DimX:
{
@@ -180,7 +179,7 @@ Status CLConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
- offset += input->dimension(_axis);
+ offset += input->dimension(axis);
}
break;
}
@@ -191,7 +190,7 @@ Status CLConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
for(const auto &input : inputs_vector)
{
ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
- offset += input->dimension(_axis);
+ offset += input->dimension(axis);
}
break;
}
@@ -200,7 +199,7 @@ Status CLConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
for(const auto &input : inputs_vector)
{
ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
- offset += input->dimension(_axis);
+ offset += input->dimension(axis);
}
break;
}
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
index f88cb388be..dba7f23f3b 100644
--- a/src/runtime/CL/functions/CLPadLayer.cpp
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -141,7 +141,7 @@ void CLPadLayer::configure_reflect_symmetric_mode(ICLTensor *input, ICLTensor *o
}
// Concatenate the padding before and after with the input.
ICLTensor *out = (static_cast<int32_t>(i) == last_padding_dimension) ? output : &_concat_results[i];
- _concat_functions[i].configure(concat_vector, out, get_index_data_layout_dimension(prev->info()->data_layout(), i));
+ _concat_functions[i].configure(concat_vector, out, i);
prev = out;
}
}
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index fa7b91c3ca..e02c0c2c7a 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -44,10 +44,10 @@ NEConcatenateLayer::NEConcatenateLayer()
{
}
-void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis)
+void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, size_t axis)
{
ARM_COMPUTE_ERROR_ON(output == nullptr);
- _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis);
+ _axis = axis;
_num_inputs = inputs_vector.size();
std::vector<ITensorInfo *> inputs_vector_info;
@@ -104,22 +104,21 @@ void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector,
}
}
-Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis)
+Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
- const unsigned int _axis = get_data_layout_dimension_index(inputs_vector[0]->data_layout(), axis);
// Output auto inizialitation if not yet initialized
TensorInfo tmp_output_info = *output->clone();
TensorShape output_shape{};
- if(_axis == Window::DimZ)
+ if(axis == Window::DimZ)
{
output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
}
else
{
- output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
+ output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
}
auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
@@ -127,7 +126,7 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
for(const auto &input : inputs_vector)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- switch(_axis)
+ switch(axis)
{
case Window::DimX:
{
@@ -147,7 +146,7 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
default:
ARM_COMPUTE_ERROR("Axis not supported");
}
- offset += input->dimension(_axis);
+ offset += input->dimension(axis);
}
return Status{};
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index 62a7d4559b..6af2ee8868 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -182,7 +182,7 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu
}
// Concatenate the padding before and after with the input.
ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i];
- _concat_functions[i].configure(concat_vector, out, get_index_data_layout_dimension(input->info()->data_layout(), i));
+ _concat_functions[i].configure(concat_vector, out, i);
if(i != _num_dimensions - 1)
{
_concat_results[i].allocator()->allocate();
diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp
index f4a693ca7d..5da8a34351 100644
--- a/tests/validation/CL/DepthConcatenateLayer.cpp
+++ b/tests/validation/CL/DepthConcatenateLayer.cpp
@@ -73,7 +73,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), DataLayoutDimension::CHANNEL));
+ bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 2));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
@@ -95,7 +95,7 @@ TEST_CASE(Configuration, framework::DatasetMode::ALL)
// Create and configure function
CLConcatenateLayer concat_layer;
- concat_layer.configure({ &src1, &src2, &src3 }, &dst, DataLayoutDimension::CHANNEL);
+ concat_layer.configure({ &src1, &src2, &src3 }, &dst, 2);
}
template <typename T>
using CLDepthConcatenateLayerFixture = ConcatenateLayerValidationFixture<CLTensor, ICLTensor, CLAccessor, CLConcatenateLayer, T>;
diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp
index 0ca6d72bff..2c1eb7fada 100644
--- a/tests/validation/CL/WidthConcatenateLayer.cpp
+++ b/tests/validation/CL/WidthConcatenateLayer.cpp
@@ -77,7 +77,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw,&output_info.clone()->set_is_resizable(true),DataLayoutDimension::WIDTH ));
+ bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw,&output_info.clone()->set_is_resizable(true), 0));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
@@ -99,7 +99,7 @@ TEST_CASE(Configuration, framework::DatasetMode::ALL)
// Create and configure function
CLConcatenateLayer concat_layer;
- concat_layer.configure({ &src1, &src2, &src3 }, &dst, DataLayoutDimension::WIDTH);
+ concat_layer.configure({ &src1, &src2, &src3 }, &dst, 0);
}
template <typename T>
diff --git a/tests/validation/NEON/DepthConcatenateLayer.cpp b/tests/validation/NEON/DepthConcatenateLayer.cpp
index 1b355ae17d..0ddb220d34 100644
--- a/tests/validation/NEON/DepthConcatenateLayer.cpp
+++ b/tests/validation/NEON/DepthConcatenateLayer.cpp
@@ -55,13 +55,13 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(23U, 27U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(16U, 27U, 6U), 1, DataType::F32)
})),
- framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 9U), 1, DataType::F16),
- TensorInfo(TensorShape(25U, 12U, 9U), 1, DataType::F32),
- TensorInfo(TensorShape(23U, 27U, 8U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 27U, 12U), 1, DataType::F32)
- })),
- framework::dataset::make("Expected", { false, false, false, true })),
- input_info1, input_info2, output_info,expected)
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 9U), 1, DataType::F16),
+ TensorInfo(TensorShape(25U, 12U, 9U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 8U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 27U, 12U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Expected", { false, false, false, true })),
+ input_info1, input_info2, output_info,expected)
{
std::vector<TensorInfo> inputs_vector_info;
inputs_vector_info.emplace_back(std::move(input_info1));
@@ -73,7 +73,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), DataLayoutDimension::CHANNEL));
+ bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 2));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
diff --git a/tests/validation/NEON/HeightConcatenateLayer.cpp b/tests/validation/NEON/HeightConcatenateLayer.cpp
index 0d08824645..9c23fb9bd3 100644
--- a/tests/validation/NEON/HeightConcatenateLayer.cpp
+++ b/tests/validation/NEON/HeightConcatenateLayer.cpp
@@ -77,7 +77,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(true), DataLayoutDimension::HEIGHT));
+ bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(true), 1));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
diff --git a/tests/validation/NEON/WidthConcatenateLayer.cpp b/tests/validation/NEON/WidthConcatenateLayer.cpp
index 20df3f4d7d..ed840ef325 100644
--- a/tests/validation/NEON/WidthConcatenateLayer.cpp
+++ b/tests/validation/NEON/WidthConcatenateLayer.cpp
@@ -75,8 +75,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw,
- &output_info.clone()->set_is_resizable(true),DataLayoutDimension::WIDTH));
+ bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(true), 0));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
diff --git a/tests/validation/fixtures/ConcatenateLayerFixture.h b/tests/validation/fixtures/ConcatenateLayerFixture.h
index db09957c09..39d4f9f95d 100644
--- a/tests/validation/fixtures/ConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/ConcatenateLayerFixture.h
@@ -112,21 +112,7 @@ protected:
// Create and configure function
FunctionType concat;
- switch(axis)
- {
- case 0:
- concat.configure(src_ptrs, &dst, DataLayoutDimension::WIDTH);
- break;
- case 1:
- concat.configure(src_ptrs, &dst, DataLayoutDimension::HEIGHT);
- break;
- case 2:
- concat.configure(src_ptrs, &dst, DataLayoutDimension::CHANNEL);
- break;
- default:
- ARM_COMPUTE_ERROR("Not supported");
- break;
- }
+ concat.configure(src_ptrs, &dst, axis);
for(auto &src : srcs)
{