From 8aa985e6cd553f4e2cee6cab74b82fa626896299 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Tue, 27 Nov 2018 15:58:08 +0000 Subject: COMPMID-1725: Implement Pack Change-Id: I13f6e4c600f39355f69e015409bf30dafdc5e3aa Reviewed-on: https://review.mlplatform.org/332 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- tests/validation/CL/StackLayer.cpp | 405 ++++++++++++++++++++++++++ tests/validation/fixtures/StackLayerFixture.h | 138 +++++++++ tests/validation/reference/StackLayer.cpp | 125 ++++++++ tests/validation/reference/StackLayer.h | 44 +++ 4 files changed, 712 insertions(+) create mode 100644 tests/validation/CL/StackLayer.cpp create mode 100644 tests/validation/fixtures/StackLayerFixture.h create mode 100644 tests/validation/reference/StackLayer.cpp create mode 100644 tests/validation/reference/StackLayer.h (limited to 'tests') diff --git a/tests/validation/CL/StackLayer.cpp b/tests/validation/CL/StackLayer.cpp new file mode 100644 index 0000000000..089911272a --- /dev/null +++ b/tests/validation/CL/StackLayer.cpp @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLStackLayer.h" +#include "tests/CL/CLAccessor.h" +#include "tests/CL/Helper.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/StackLayerFixture.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +// *INDENT-OFF* +// clang-format off +/** Data types */ +const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8, DataType::F16, DataType::F32 }); + +/** Num tensors values to test */ +const auto n_values = framework::dataset::make("NumTensors", { 3, 4 }); + +/** Shapes 1D to test */ +const auto shapes_1d_small = combine(datasets::Small1DShapes(), framework::dataset::make("Axis", -1, 2)); + +/** Shapes 2D to test */ +const auto shapes_2d_small = combine(datasets::Small2DShapes(), framework::dataset::make("Axis", -2, 3)); + +/** Shapes 3D to test */ +const auto shapes_3d_small = combine(datasets::Small3DShapes(), framework::dataset::make("Axis", -3, 4)); + +/** Shapes 4D to test */ +const auto shapes_4d_small = combine(datasets::Small4DShapes(), framework::dataset::make("Axis", -4, 5)); + +/** Shapes 1D to test */ +const auto shapes_1d_large = combine(datasets::Large1DShapes(), framework::dataset::make("Axis", -1, 2)); + +/** Shapes 2D to test */ +const auto shapes_2d_large = combine(datasets::Large2DShapes(), framework::dataset::make("Axis", -2, 3)); + +/** Shapes 3D to test */ +const auto shapes_3d_large = combine(datasets::Large3DShapes(), framework::dataset::make("Axis", -3, 4)); + +/** Shapes 4D to test */ +const auto shapes_4d_large = combine(datasets::Large4DShapes(), framework::dataset::make("Axis", -4, 5)); + +/** Configuration test */ +void validate_configuration(TensorShape shape_in, int axis, DataType data_type, int num_tensors) +{ + // Wrap around negative values + const unsigned int axis_u = wrap_around(axis, static_cast(shape_in.num_dimensions() + 1)); + + const TensorShape shape_dst = compute_stack_shape(TensorInfo(shape_in, 1, data_type), axis_u, num_tensors); + + std::vector tensors(num_tensors); + std::vector src(num_tensors); + + // Create vector of input tensors + for(int i = 0; i < num_tensors; ++i) + { + tensors[i] = create_tensor(shape_in, data_type); + src[i] = &(tensors[i]); + ARM_COMPUTE_EXPECT(src[i]->info()->is_resizable(), framework::LogLevel::ERRORS); + } + + // Create tensors + CLTensor dst = create_tensor(shape_dst, data_type); + + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLStackLayer stack; + stack.configure(src, axis, &dst); +} +} // namespace + +/** Fixture to use */ +template +using CLStackLayerFixture = StackLayerValidationFixture; + +using namespace arm_compute::misc::shape_calculator; + +TEST_SUITE(CL) +TEST_SUITE(StackLayer) +TEST_SUITE(Shapes1D) + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_1d_small, + data_types), + n_values), +shape_in, axis, data_type, num_tensors) +{ + validate_configuration(shape_in, axis, data_type, num_tensors); +} + +TEST_SUITE(S32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_1d_small, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_1d_large, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S32 + +TEST_SUITE(S16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_1d_small, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_1d_large, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S16 + +TEST_SUITE(S8) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_1d_small, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_1d_large, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S8 +TEST_SUITE_END() // Shapes1D + +TEST_SUITE(Shapes2D) + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_2d_small, + data_types), + n_values), +shape_in, axis, data_type, num_tensors) +{ + validate_configuration(shape_in, axis, data_type, num_tensors); +} + +TEST_SUITE(S32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_2d_small, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_2d_large, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S32 + +TEST_SUITE(S16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_2d_small, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_2d_large, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S16 + +TEST_SUITE(S8) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_2d_small, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_2d_large, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S8 +TEST_SUITE_END() // Shapes2D + +TEST_SUITE(Shapes3D) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_3d_small, + data_types), + n_values), +shape_in, axis, data_type, num_tensors) +{ + validate_configuration(shape_in, axis, data_type, num_tensors); +} + +TEST_SUITE(S32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_3d_small, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_3d_large, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S32 + +TEST_SUITE(S16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_3d_small, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_3d_large, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S16 + +TEST_SUITE(S8) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_3d_small, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_3d_large, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S8 +TEST_SUITE_END() // Shapes3D + +TEST_SUITE(Shapes4D) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_4d_small, + data_types), + n_values), +shape_in, axis, data_type, num_tensors) +{ + validate_configuration(shape_in, axis, data_type, num_tensors); +} + +TEST_SUITE(S32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_4d_small, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_4d_large, + framework::dataset::make("DataType", { DataType::S32 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S32 + +TEST_SUITE(S16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_4d_small, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_4d_large, + framework::dataset::make("DataType", { DataType::S16 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S16 + +TEST_SUITE(S8) +FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture, framework::DatasetMode::ALL, + combine(combine(shapes_4d_small, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture, framework::DatasetMode::NIGHTLY, + combine(combine(shapes_4d_large, + framework::dataset::make("DataType", { DataType::S8 })), + n_values)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S8 +TEST_SUITE_END() // Shapes4D +TEST_SUITE_END() // StackLayer +TEST_SUITE_END() // CL +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/StackLayerFixture.h b/tests/validation/fixtures/StackLayerFixture.h new file mode 100644 index 0000000000..cab4350787 --- /dev/null +++ b/tests/validation/fixtures/StackLayerFixture.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE +#define ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/StackLayer.h" +#include "tests/validation/reference/Utils.h" + +#include +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +using namespace arm_compute::misc::shape_calculator; + +template +class StackLayerValidationFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape shape_src, int axis, DataType data_type, int num_tensors) + { + _target = compute_target(shape_src, axis, data_type, num_tensors); + _reference = compute_reference(shape_src, axis, data_type, num_tensors); + } + +protected: + template + void fill(U &&tensor, unsigned int i) + { + library->fill_tensor_uniform(tensor, i); + } + + TensorType compute_target(TensorShape shape_src, int axis, DataType data_type, int num_tensors) + { + std::vector tensors(num_tensors); + std::vector src(num_tensors); + + // Create vector of input tensors + for(int i = 0; i < num_tensors; ++i) + { + tensors[i] = create_tensor(shape_src, data_type); + src[i] = &(tensors[i]); + ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS); + } + + // Create tensors + CLTensor dst; + + // The output tensor will be auto-initialized within the function + + // Create and configure function + FunctionType stack; + stack.configure(src, axis, &dst); + + // Allocate and fill the input tensors + for(int i = 0; i < num_tensors; ++i) + { + ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS); + tensors[i].allocator()->allocate(); + ARM_COMPUTE_EXPECT(!tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill input tensor + fill(AccessorType(tensors[i]), i); + } + + // Allocate output tensor + dst.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Compute stack function + stack.run(); + + return dst; + } + + SimpleTensor compute_reference(const TensorShape &shape_src, int axis, DataType data_type, int num_tensors) + { + std::vector> src; + + for(int i = 0; i < num_tensors; ++i) + { + src.emplace_back(std::move(SimpleTensor(shape_src, data_type, 1))); + + fill(src[i], i); + } + + // Wrap around negative values + const unsigned int axis_u = wrap_around(axis, static_cast(shape_src.num_dimensions() + 1)); + + const TensorShape shape_dst = compute_stack_shape(TensorInfo(shape_src, 1, data_type), axis_u, num_tensors); + + return reference::stack_layer(src, shape_dst, data_type, axis_u); + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE */ diff --git a/tests/validation/reference/StackLayer.cpp b/tests/validation/reference/StackLayer.cpp new file mode 100644 index 0000000000..50e440c914 --- /dev/null +++ b/tests/validation/reference/StackLayer.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "StackLayer.h" + +#include "arm_compute/core/Types.h" + +#include "tests/validation/Helpers.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +SimpleTensor stack_layer(const std::vector> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis) +{ + ARM_COMPUTE_ERROR_ON(output_shape.num_dimensions() > 5); + ARM_COMPUTE_ERROR_ON(in.size() < 2); + ARM_COMPUTE_ERROR_ON(axis > in[0].shape().num_dimensions()); + + SimpleTensor out{ output_shape, data_type }; + + const int width = in[0].shape()[0]; + const int height = in[0].shape()[1]; + const int depth = in[0].shape()[2]; + const int batch_size = in[0].shape()[3]; + const int num_tensors = in.size(); + + // Array to store the input coordinates + // i_coordinates[0] = xi, i_coordinates[1] = yi, i_coordinates[2] = zi + // i_coordinates[3] = bi, i_coordinates[4] = i, i_coordinates[5] = 0 + // i_coordinates[5] will be always zero and used for not incrementing the output when the input has less than 4 dimensions + int i_coordinates[6] = { 0 }; + + // Array of pointers used to map the output coordinates to the input ones accordingly with the axis + // This array is initialized with &i_coordinates[5] since this will be always zero + int *o_coordinates[5] = { &i_coordinates[5], &i_coordinates[5], &i_coordinates[5], &i_coordinates[5], &i_coordinates[5] }; + + // Set the axis coordinate + o_coordinates[axis] = &i_coordinates[4]; + + unsigned int k_shift = 0; + + // Map the output coordinates + for(unsigned int k = 0; k < in[0].shape().num_dimensions(); ++k) + { + if(k == axis) + { + k_shift++; + } + + o_coordinates[k + k_shift] = &i_coordinates[k]; + } + + // Use alias for the input coordinates + int &xi = i_coordinates[0]; + int &yi = i_coordinates[1]; + int &zi = i_coordinates[2]; + int &bi = i_coordinates[3]; + int &i = i_coordinates[4]; + + // Use alias for the output coordinates + int &xo = *(o_coordinates[0]); + int &yo = *(o_coordinates[1]); + int &zo = *(o_coordinates[2]); + int &bo = *(o_coordinates[3]); + int &wo = *(o_coordinates[4]); + + // Stack tensors + for(; i < num_tensors; ++(i)) + { + bi = 0; + for(; bi < batch_size; ++(bi)) + { + zi = 0; + for(; zi < depth; ++(zi)) + { + yi = 0; + for(; yi < height; ++(yi)) + { + xi = 0; + for(; xi < width; ++(xi)) + { + *(reinterpret_cast(out(Coordinates(xo, yo, zo, bo, wo)))) = *(reinterpret_cast(in[i](Coordinates(xi, yi, zi, bi)))); + } + } + } + } + } + + return out; +} +template SimpleTensor stack_layer(const std::vector> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis); +template SimpleTensor stack_layer(const std::vector> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis); +template SimpleTensor stack_layer(const std::vector> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/reference/StackLayer.h b/tests/validation/reference/StackLayer.h new file mode 100644 index 0000000000..453f176a9d --- /dev/null +++ b/tests/validation/reference/StackLayer.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_TEST_STACK_LAYER_H__ +#define __ARM_COMPUTE_TEST_STACK_LAYER_H__ + +#include "tests/SimpleTensor.h" +#include "tests/validation/Helpers.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +SimpleTensor stack_layer(const std::vector> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* __ARM_COMPUTE_TEST_STACK_LAYER_H__ */ -- cgit v1.2.1