From 5a7d1571a2de24eefc6f1d8d22deeef9f47521ee Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Tue, 21 Mar 2023 12:00:15 +0000 Subject: Fix BatchToSpaceFixture * Use a vector to represent the (static) block shape instead of an N-D Tensor. The previous use of ND Tensor as block shape was wrong, not adhering to the specification, and non-functional (only first dim was used anyway). * The fixture now accepts a static block shape, because the dynamic case is not properly implemented and will be deprecated for now. * Fix an assertion error in reference implementation. Partially resolves COMPMID-5918 Change-Id: I5221e52ccc05e7c1249dec3a42426f954a73729a Signed-off-by: SiCong Li Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9357 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez Tello Reviewed-by: Omar Al Khatib Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- arm_compute/core/utils/misc/ShapeCalculator.h | 32 ++++---- src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp | 6 +- .../NEON/kernels/NEBatchToSpaceLayerKernel.cpp | 4 +- tests/datasets/BatchToSpaceDataset.h | 85 +++++++++++++++------- .../validation/fixtures/BatchToSpaceLayerFixture.h | 56 ++++---------- tests/validation/reference/BatchToSpaceLayer.cpp | 18 +++-- tests/validation/reference/BatchToSpaceLayer.h | 2 +- 7 files changed, 107 insertions(+), 96 deletions(-) diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index a895b58aba..916da1bd9d 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -1100,28 +1100,28 @@ inline TensorShape compute_slice_shape(const TensorShape &input_shape, const Coo /** Calculate the batch to space output shape of a tensor * - * @param[in] input Input tensor info - * @param[in] block_x Block shape x value - * @param[in] block_y Block shape y value - * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed + * @param[in] data_layout Data layout + * @param[in] input Input tensor shape + * @param[in] block_x Block shape x value + * @param[in] block_y Block shape y value + * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed * * @return the calculated shape */ -inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const int block_x, const int block_y, const CropInfo &crop_info = CropInfo{}) +inline TensorShape compute_batch_to_space_shape(DataLayout data_layout, const TensorShape &input, int block_x, int block_y, const CropInfo &crop_info = CropInfo{}) { - ARM_COMPUTE_ERROR_ON(block_x <= 0 || block_y <= 0); + ARM_COMPUTE_ERROR_ON(block_x < 1 || block_y < 1); - const DataLayout data_layout = input->data_layout(); - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); + const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); - TensorShape output_shape{ input->tensor_shape() }; + TensorShape output_shape{ input }; - auto new_width = input->tensor_shape()[idx_width] * block_x; - auto new_height = input->tensor_shape()[idx_height] * block_y; - const auto width_crop = crop_info.left + crop_info.right; - const auto height_crop = crop_info.top + crop_info.bottom; + unsigned int new_width = input[idx_width] * static_cast(block_x); + unsigned int new_height = input[idx_height] * static_cast(block_y); + const unsigned int width_crop = crop_info.left + crop_info.right; + const unsigned int height_crop = crop_info.top + crop_info.bottom; ARM_COMPUTE_ERROR_ON(new_width <= width_crop); ARM_COMPUTE_ERROR_ON(new_height <= height_crop); new_width -= width_crop; @@ -1129,7 +1129,7 @@ inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const output_shape.set(idx_width, new_width); output_shape.set(idx_height, new_height); - output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y)); + output_shape.set(idx_batch, input[idx_batch] / (block_x * block_y)); return output_shape; } diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp index 6f333dd925..b47d5a7e38 100644 --- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp +++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -128,8 +128,8 @@ void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_contex { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - TensorShape output_shape = compute_batch_to_space_shape(input->info(), block_shape_x, block_shape_y); - auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type()); + const TensorShape output_shape = compute_batch_to_space_shape(input->info()->data_layout(), input->info()->tensor_shape(), block_shape_x, block_shape_y); + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, output->info())); diff --git a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp index 10207b9cf6..84c727df73 100644 --- a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp +++ b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2020, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -103,7 +103,7 @@ void NEBatchToSpaceLayerKernel::configure(const ITensor *input, const ITensor *b void NEBatchToSpaceLayerKernel::configure(const ITensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ITensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - TensorShape output_shape = compute_batch_to_space_shape(input->info(), block_shape_x, block_shape_y); + const TensorShape output_shape = compute_batch_to_space_shape(input->info()->data_layout(), input->info()->tensor_shape(), block_shape_x, block_shape_y); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); diff --git a/tests/datasets/BatchToSpaceDataset.h b/tests/datasets/BatchToSpaceDataset.h index 1edd457aad..2670af50df 100644 --- a/tests/datasets/BatchToSpaceDataset.h +++ b/tests/datasets/BatchToSpaceDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2019, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -38,15 +38,17 @@ namespace datasets class BatchToSpaceLayerDataset { public: - using type = std::tuple; + using type = std::tuple, CropInfo, TensorShape>; struct iterator { - iterator(std::vector::const_iterator src_it, - std::vector::const_iterator block_shape_it, - std::vector::const_iterator dst_it) + iterator(std::vector::const_iterator src_it, + std::vector>::const_iterator block_shape_it, + std::vector::const_iterator crop_info_it, + std::vector::const_iterator dst_it) : _src_it{ std::move(src_it) }, _block_shape_it{ std::move(block_shape_it) }, + _crop_info_it{ std::move(crop_info_it) }, _dst_it{ std::move(dst_it) } { } @@ -56,44 +58,48 @@ public: std::stringstream description; description << "In=" << *_src_it << ":"; description << "BlockShape=" << *_block_shape_it << ":"; + description << "CropInfo=" << *_crop_info_it << ":"; description << "Out=" << *_dst_it; return description.str(); } BatchToSpaceLayerDataset::type operator*() const { - return std::make_tuple(*_src_it, *_block_shape_it, *_dst_it); + return std::make_tuple(*_src_it, *_block_shape_it, *_crop_info_it, *_dst_it); } iterator &operator++() { ++_src_it; ++_block_shape_it; + ++_crop_info_it; ++_dst_it; return *this; } private: - std::vector::const_iterator _src_it; - std::vector::const_iterator _block_shape_it; - std::vector::const_iterator _dst_it; + std::vector::const_iterator _src_it; + std::vector>::const_iterator _block_shape_it; + std::vector::const_iterator _crop_info_it; + std::vector::const_iterator _dst_it; }; iterator begin() const { - return iterator(_src_shapes.begin(), _block_shape_shapes.begin(), _dst_shapes.begin()); + return iterator(_src_shapes.begin(), _block_shapes.begin(), _crop_infos.begin(), _dst_shapes.begin()); } int size() const { - return std::min(_src_shapes.size(), std::min(_block_shape_shapes.size(), _dst_shapes.size())); + return std::min(std::min(std::min(_src_shapes.size(), _block_shapes.size()), _crop_infos.size()), _dst_shapes.size()); } - void add_config(TensorShape src, TensorShape block_shape, TensorShape dst) + void add_config(const TensorShape &src, const std::vector &block_shape, const CropInfo &crop_info, const TensorShape &dst) { _src_shapes.emplace_back(std::move(src)); - _block_shape_shapes.emplace_back(std::move(block_shape)); + _block_shapes.emplace_back(std::move(block_shape)); + _crop_infos.emplace_back(std::move(crop_info)); _dst_shapes.emplace_back(std::move(dst)); } @@ -102,35 +108,60 @@ protected: BatchToSpaceLayerDataset(BatchToSpaceLayerDataset &&) = default; private: - std::vector _src_shapes{}; - std::vector _block_shape_shapes{}; - std::vector _dst_shapes{}; + std::vector _src_shapes{}; + std::vector> _block_shapes{}; + std::vector _crop_infos{}; + std::vector _dst_shapes{}; }; +/** Follow NCHW data layout across all datasets. I.e. + * TensorShape(Width(X), Height(Y), Channel(Z), Batch(W)) + */ + class SmallBatchToSpaceLayerDataset final : public BatchToSpaceLayerDataset { public: SmallBatchToSpaceLayerDataset() { - add_config(TensorShape(1U, 1U, 1U, 4U), TensorShape(2U), TensorShape(2U, 2U, 1U, 1U)); - add_config(TensorShape(3U, 1U, 1U, 4U), TensorShape(2U), TensorShape(6U, 2U, 1U, 1U)); - add_config(TensorShape(1U, 2U, 2U, 4U), TensorShape(2U), TensorShape(2U, 4U, 2U, 1U)); - add_config(TensorShape(1U, 3U, 1U, 8U), TensorShape(2U), TensorShape(2U, 6U, 1U, 2U)); - add_config(TensorShape(3U, 4U, 1U, 4U), TensorShape(2U), TensorShape(6U, 8U, 1U, 1U)); - add_config(TensorShape(1U, 1U, 1U, 8U), TensorShape(4U, 2U), TensorShape(4U, 2U, 1U, 1U)); - add_config(TensorShape(3U, 1U, 1U, 8U), TensorShape(2U, 4U), TensorShape(6U, 4U, 1U, 1U)); + // Block size = 1 (effectively no batch to space) + add_config(TensorShape(1U, 1U, 1U, 4U), { 1U, 1U }, CropInfo(), TensorShape(1U, 1U, 1U, 4U)); + add_config(TensorShape(8U, 2U, 4U, 3U), { 1U, 1U }, CropInfo(), TensorShape(8U, 2U, 4U, 3U)); + // Same block size in both x and y + add_config(TensorShape(3U, 2U, 1U, 4U), { 2U, 2U }, CropInfo(), TensorShape(6U, 4U, 1U, 1U)); + add_config(TensorShape(1U, 3U, 2U, 9U), { 3U, 3U }, CropInfo(), TensorShape(3U, 9U, 2U, 1U)); + // Different block size in x and y + add_config(TensorShape(5U, 7U, 7U, 4U), { 2U, 1U }, CropInfo(), TensorShape(10U, 7U, 7U, 2U)); + add_config(TensorShape(3U, 3U, 1U, 8U), { 1U, 2U }, CropInfo(), TensorShape(3U, 6U, 1U, 4U)); + add_config(TensorShape(5U, 2U, 2U, 6U), { 3U, 2U }, CropInfo(), TensorShape(15U, 4U, 2U, 1U)); } }; +/** Relative small shapes that are still large enough to leave room for testing cropping of the output shape + */ +class SmallBatchToSpaceLayerWithCroppingDataset final : public BatchToSpaceLayerDataset +{ +public: + SmallBatchToSpaceLayerWithCroppingDataset() + { + // Crop in both dims + add_config(TensorShape(5U, 3U, 2U, 8U), { 2U, 2U }, CropInfo(1U, 1U, 2U, 1U), TensorShape(8U, 3U, 2U, 2U)); + // Left crop in x dim + add_config(TensorShape(1U, 1U, 1U, 20U), { 4U, 5U }, CropInfo(2U, 1U, 0U, 2U), TensorShape(1U, 3U, 1U, 1U)); + // Left crop in y dim + add_config(TensorShape(3U, 1U, 1U, 8U), { 2U, 4U }, CropInfo(0U, 0U, 2U, 1U), TensorShape(6U, 1U, 1U, 1U)); + } +}; class LargeBatchToSpaceLayerDataset final : public BatchToSpaceLayerDataset { public: LargeBatchToSpaceLayerDataset() { - add_config(TensorShape(64U, 32U, 2U, 4U), TensorShape(2U), TensorShape(128U, 64U, 2U, 1U)); - add_config(TensorShape(128U, 16U, 2U, 16U), TensorShape(2U), TensorShape(512U, 64U, 2U, 1U)); - add_config(TensorShape(16U, 8U, 2U, 8U), TensorShape(4U, 2U), TensorShape(64U, 16U, 2U, 1U)); - add_config(TensorShape(8U, 16U, 2U, 8U), TensorShape(2U, 4U), TensorShape(16U, 64U, 2U, 1U)); + // Same block size in both x and y + add_config(TensorShape(64U, 32U, 2U, 4U), { 2U, 2U }, CropInfo(), TensorShape(128U, 64U, 2U, 1U)); + add_config(TensorShape(128U, 16U, 2U, 18U), { 3U, 3U }, CropInfo(), TensorShape(384U, 48U, 2U, 2U)); + // Different block size in x and y + add_config(TensorShape(16U, 8U, 2U, 8U), { 4U, 1U }, CropInfo(), TensorShape(64U, 8U, 2U, 2U)); + add_config(TensorShape(8U, 16U, 2U, 8U), { 2U, 4U }, CropInfo(), TensorShape(16U, 64U, 2U, 1U)); } }; } // namespace datasets diff --git a/tests/validation/fixtures/BatchToSpaceLayerFixture.h b/tests/validation/fixtures/BatchToSpaceLayerFixture.h index 5a23261a6e..19fc82a87b 100644 --- a/tests/validation/fixtures/BatchToSpaceLayerFixture.h +++ b/tests/validation/fixtures/BatchToSpaceLayerFixture.h @@ -24,6 +24,7 @@ #ifndef ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE #define ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE +#include "arm_compute/core/Helpers.h" #include "tests/Globals.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" @@ -36,14 +37,14 @@ namespace test namespace validation { template -class BatchToSpaceLayerValidationGenericFixture : public framework::Fixture +class BatchToSpaceLayerValidationFixture : public framework::Fixture { public: template - void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout, const CropInfo &crop_info = CropInfo{}) + void setup(const TensorShape &input_shape, const std::vector &block_shape, const CropInfo &crop_info, const TensorShape &output_shape, DataType data_type, DataLayout data_layout) { - _target = compute_target(input_shape, block_shape_shape, output_shape, data_type, data_layout, crop_info); - _reference = compute_reference(input_shape, block_shape_shape, output_shape, data_type, crop_info); + _target = compute_target(input_shape, block_shape, crop_info, output_shape, data_type, data_layout); + _reference = compute_reference(input_shape, block_shape, crop_info, output_shape, data_type); } protected: @@ -56,9 +57,10 @@ protected: DistributionType distribution{ T(-1.0f), T(1.0f) }; library->fill(tensor, distribution, i); } - TensorType compute_target(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, - DataType data_type, DataLayout data_layout, const CropInfo &crop_info) + TensorType compute_target(TensorShape input_shape, const std::vector &block_shape, const CropInfo &crop_info, TensorShape output_shape, + DataType data_type, DataLayout data_layout) { + ARM_COMPUTE_ERROR_ON(block_shape.size() != 2U); // Only support batch to 2D space (x, y) for now if(data_layout == DataLayout::NHWC) { permute(input_shape, PermutationVector(2U, 0U, 1U)); @@ -66,75 +68,49 @@ protected: } // Create tensors - TensorType input = create_tensor(input_shape, data_type, 1, QuantizationInfo(), data_layout); - TensorType block_shape = create_tensor(block_shape_shape, DataType::S32); - TensorType output = create_tensor(output_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType input = create_tensor(input_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType output = create_tensor(output_shape, data_type, 1, QuantizationInfo(), data_layout); // Create and configure function FunctionType batch_to_space; - batch_to_space.configure(&input, &block_shape, &output, crop_info); + batch_to_space.configure(&input, block_shape.at(0), block_shape.at(1), &output, crop_info); ARM_COMPUTE_ASSERT(input.info()->is_resizable()); - ARM_COMPUTE_ASSERT(block_shape.info()->is_resizable()); ARM_COMPUTE_ASSERT(output.info()->is_resizable()); // Allocate tensors input.allocator()->allocate(); - block_shape.allocator()->allocate(); output.allocator()->allocate(); ARM_COMPUTE_ASSERT(!input.info()->is_resizable()); - ARM_COMPUTE_ASSERT(!block_shape.info()->is_resizable()); ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors fill(AccessorType(input), 0); - { - auto block_shape_data = AccessorType(block_shape); - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - for(unsigned int i = 0; i < block_shape_shape.x(); ++i) - { - static_cast(block_shape_data.data())[i] = output_shape[i + idx_width] / input_shape[i + idx_width]; - } - } // Compute function batch_to_space.run(); return output; } - SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &block_shape_shape, - const TensorShape &output_shape, DataType data_type, const CropInfo &crop_info) + SimpleTensor compute_reference(const TensorShape &input_shape, const std::vector &block_shape, + const CropInfo &crop_info, const TensorShape &output_shape, DataType data_type) { + ARM_COMPUTE_ERROR_ON(block_shape.size() != 2U); // Only support batch to 2D space (x, y) for now // Create reference - SimpleTensor input{ input_shape, data_type }; - SimpleTensor block_shape{ block_shape_shape, DataType::S32 }; + SimpleTensor input{ input_shape, data_type }; // Fill reference fill(input, 0); - for(unsigned int i = 0; i < block_shape_shape.x(); ++i) - { - block_shape[i] = output_shape[i] / input_shape[i]; - } // Compute reference - return reference::batch_to_space(input, block_shape, output_shape, crop_info); + return reference::batch_to_space(input, block_shape, crop_info, output_shape); } TensorType _target{}; SimpleTensor _reference{}; }; -template -class BatchToSpaceLayerValidationFixture : public BatchToSpaceLayerValidationGenericFixture -{ -public: - template - void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout) - { - BatchToSpaceLayerValidationGenericFixture::setup(input_shape, block_shape_shape, output_shape, data_type, data_layout, CropInfo{}); - } -}; } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/BatchToSpaceLayer.cpp b/tests/validation/reference/BatchToSpaceLayer.cpp index aeda733bb6..63d121f59b 100644 --- a/tests/validation/reference/BatchToSpaceLayer.cpp +++ b/tests/validation/reference/BatchToSpaceLayer.cpp @@ -23,8 +23,10 @@ */ #include "BatchToSpaceLayer.h" +#include "arm_compute/core/Validate.h" #include "tests/validation/Helpers.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" namespace arm_compute { namespace test @@ -35,18 +37,20 @@ namespace reference { // Batch to Space template -SimpleTensor batch_to_space(const SimpleTensor &src, const SimpleTensor &block_shape, const TensorShape &dst_shape, const CropInfo &crop_info) +SimpleTensor batch_to_space(const SimpleTensor &src, const std::vector &block_shape, const CropInfo &crop_info, const TensorShape &dst_shape) { - ARM_COMPUTE_ERROR_ON(block_shape[0] <= 0); - ARM_COMPUTE_ERROR_ON(block_shape[1] <= 0); + ARM_COMPUTE_ERROR_ON(block_shape[0] < 1); + ARM_COMPUTE_ERROR_ON(block_shape[1] < 1); + const auto expected_dst_shape = misc::shape_calculator::compute_batch_to_space_shape(DataLayout::NCHW, src.shape(), block_shape[0], block_shape[1], crop_info); + ARM_COMPUTE_ERROR_ON(arm_compute::detail::have_different_dimensions(expected_dst_shape, dst_shape, 0)); + ARM_COMPUTE_UNUSED(expected_dst_shape); + SimpleTensor result(dst_shape, src.data_type()); int out_pos = 0; const auto width_out = static_cast(dst_shape[0]); const auto height_out = static_cast(dst_shape[1]); const auto z_out = static_cast(dst_shape[2]); const auto batch_out = static_cast(dst_shape[3]); - ARM_COMPUTE_ERROR_ON(width_out <= static_cast(crop_info.left + crop_info.right)); - ARM_COMPUTE_ERROR_ON(height_out <= static_cast(crop_info.top + crop_info.bottom)); for(int batch = 0; batch < batch_out; ++batch) { @@ -71,8 +75,8 @@ SimpleTensor batch_to_space(const SimpleTensor &src, const SimpleTensor batch_to_space(const SimpleTensor &src, const SimpleTensor &block_shape, const TensorShape &dst_shape, const CropInfo &crop_info = CropInfo{}); -template SimpleTensor batch_to_space(const SimpleTensor &src, const SimpleTensor &block_shape, const TensorShape &dst_shape, const CropInfo &crop_info = CropInfo{}); +template SimpleTensor batch_to_space(const SimpleTensor &src, const std::vector &block_shape, const CropInfo &crop_info, const TensorShape &dst_shape); +template SimpleTensor batch_to_space(const SimpleTensor &src, const std::vector &block_shape, const CropInfo &crop_info, const TensorShape &dst_shape); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/BatchToSpaceLayer.h b/tests/validation/reference/BatchToSpaceLayer.h index 18010f1885..a37bfc3373 100644 --- a/tests/validation/reference/BatchToSpaceLayer.h +++ b/tests/validation/reference/BatchToSpaceLayer.h @@ -37,7 +37,7 @@ namespace validation namespace reference { template -SimpleTensor batch_to_space(const SimpleTensor &src, const SimpleTensor &block_shape, const TensorShape &dst_shape, const CropInfo &crop_info = CropInfo{}); +SimpleTensor batch_to_space(const SimpleTensor &src, const std::vector &block_shape, const CropInfo &crop_info, const TensorShape &dst_shape); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1