aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorge Wort <george.wort@arm.com>2019-02-15 15:12:52 +0000
committerManuel Bottini <manuel.bottini@arm.com>2019-03-13 13:54:10 +0000
commit894066de8cc26d1a3aca62dcaa6b30a2a1116028 (patch)
tree9dcb227018ea69fcfb83f7b25be2009fdd16e18e
parentadfb2737046028c042f0aecaff87733a442da29f (diff)
downloadComputeLibrary-894066de8cc26d1a3aca62dcaa6b30a2a1116028.tar.gz
COMPMID-1844: Implement CLCrop
Change-Id: I8822c37adc45960705dc3f32a53214795ba3cf39 Signed-off-by: George Wort <george.wort@arm.com> Reviewed-on: https://review.mlplatform.org/c/789 Reviewed-by: Manuel Bottini <manuel.bottini@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernels.h1
-rw-r--r--arm_compute/core/CL/kernels/CLCopyKernel.h22
-rw-r--r--arm_compute/core/CL/kernels/CLCropKernel.h88
-rw-r--r--arm_compute/core/CL/kernels/CLMemsetKernel.h9
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLCropResize.h114
-rw-r--r--src/core/CL/CLKernelLibrary.cpp5
-rw-r--r--src/core/CL/cl_kernels/copy_tensor.cl17
-rw-r--r--src/core/CL/cl_kernels/crop_tensor.cl96
-rw-r--r--src/core/CL/cl_kernels/memset.cl15
-rw-r--r--src/core/CL/kernels/CLCopyKernel.cpp122
-rw-r--r--src/core/CL/kernels/CLCropKernel.cpp132
-rw-r--r--src/core/CL/kernels/CLMemsetKernel.cpp47
-rw-r--r--src/runtime/CL/functions/CLCropResize.cpp272
-rw-r--r--tests/validation/CL/CropResize.cpp184
15 files changed, 1059 insertions, 66 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index e0d124bff9..396158ff2d 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -46,6 +46,7 @@
#include "arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h"
#include "arm_compute/core/CL/kernels/CLConvolutionKernel.h"
#include "arm_compute/core/CL/kernels/CLCopyKernel.h"
+#include "arm_compute/core/CL/kernels/CLCropKernel.h"
#include "arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLCopyKernel.h b/arm_compute/core/CL/kernels/CLCopyKernel.h
index 9fc3759f4e..464fa13791 100644
--- a/arm_compute/core/CL/kernels/CLCopyKernel.h
+++ b/arm_compute/core/CL/kernels/CLCopyKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,20 +47,22 @@ public:
CLCopyKernel &operator=(CLCopyKernel &&) = default;
/** Initialize the kernel's input, output.
*
- * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
- * @param[out] output Destination tensor. Data types supported: same as @p input.
- * @param[in] padding (Optional) Padding to be applied to the input tensor
+ * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] padding (Optional) Padding to be applied to the input tensor
+ * @param[in] output_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr.
*/
- void configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding = PaddingList());
+ void configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding = PaddingList(), Window *output_window = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLCopyKernel
*
- * @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
- * @param[in] output Destination tensor info. Data types supported: same as @p input.
- * @param[in] padding (Optional) Padding to be applied to the input tensor
+ * @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[in] output Destination tensor info. Data types supported: same as @p input.
+ * @param[in] padding (Optional) Padding to be applied to the input tensor
+ * @param[in] output_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList());
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList(), Window *output_window = nullptr);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -68,6 +70,8 @@ public:
private:
const ICLTensor *_input;
ICLTensor *_output;
+ Window _output_window;
+ bool _has_output_window;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_CLCOPYKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLCropKernel.h b/arm_compute/core/CL/kernels/CLCropKernel.h
new file mode 100644
index 0000000000..ff4b13f865
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLCropKernel.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLCROPKERNEL_H__
+#define __ARM_COMPUTE_CLCROPKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** OpenCL kernel to perform a copy between two tensors */
+class CLCropKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLCropKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers). */
+ CLCropKernel(const CLCropKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers). */
+ CLCropKernel &operator=(const CLCropKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLCropKernel(CLCropKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLCropKernel &operator=(CLCropKernel &&) = default;
+ /** Configure kernel
+ *
+ * @note Supported tensor rank: up to 4
+ *
+ * @param[in] input Source tensor. Data type supported: U16/S16/U32/S32/F16/F32. Data layouts supported: NHWC.
+ * @param[out] output Destination tensor. Data type supported: F32
+ * @param[in] start Coordinates of where to start cropping the image.
+ * @param[in] end Coordinates of where to end cropping the image.
+ * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p input.
+ * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0.
+ * @param[in] output_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr.
+ */
+ void configure(const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0, Window *output_window = nullptr);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLStridedSliceKernel
+ *
+ * @note Supported tensor rank: up to 4
+ *
+ * @param[in] input Source tensor info. Data type supported: U16/S16/U32/S32/F16/F32. Data layouts supported: NHWC.
+ * @param[in] output Destination tensor info. Data type supported: F32
+ * @param[in] start Coordinates of where to start cropping the image.
+ * @param[in] end Coordinates of where to end cropping the image.
+ * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p input.
+ * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0.
+ * @param[in] output_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr.
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0,
+ Window *output_window = nullptr);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ ICLTensor *_output;
+ Coordinates2D _start;
+ uint32_t _batch_index;
+ float _extrapolation_value;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLCROPKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLMemsetKernel.h b/arm_compute/core/CL/kernels/CLMemsetKernel.h
index e35a9f1cf2..c79b6f4d60 100644
--- a/arm_compute/core/CL/kernels/CLMemsetKernel.h
+++ b/arm_compute/core/CL/kernels/CLMemsetKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,22 +53,25 @@ public:
*
* @param[in,out] tensor Input tensor to fill. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] constant_value The value used to fill the planes of the tensor
+ * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr.
*/
- void configure(ICLTensor *tensor, const PixelValue &constant_value);
+ void configure(ICLTensor *tensor, const PixelValue &constant_value, Window *window = nullptr);
/** Static function to check if given info will lead to a valid configuration of @ref CLMemsetKernel
*
* @param[in] tensor Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] constant_value The value used to fill the planes of the tensor
+ * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr.
*
* @return a status
*/
- static Status validate(const ITensorInfo *tensor, const PixelValue &constant_value);
+ static Status validate(const ITensorInfo *tensor, const PixelValue &constant_value, Window *window = nullptr);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
private:
ICLTensor *_tensor;
+ Window _full_window;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_CLMEMSETRKERNEL_H__ */
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 686d266557..42897a6e23 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -50,6 +50,7 @@
#include "arm_compute/runtime/CL/functions/CLConvolution.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLCopy.h"
+#include "arm_compute/runtime/CL/functions/CLCropResize.h"
#include "arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h"
#include "arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h"
diff --git a/arm_compute/runtime/CL/functions/CLCropResize.h b/arm_compute/runtime/CL/functions/CLCropResize.h
new file mode 100644
index 0000000000..d6c9fed928
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLCropResize.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CL_CROP_RESIZE_H__
+#define __ARM_COMPUTE_CL_CROP_RESIZE_H__
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/kernels/CLCopyKernel.h"
+#include "arm_compute/core/CL/kernels/CLCropKernel.h"
+#include "arm_compute/core/CL/kernels/CLMemsetKernel.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLScale.h"
+
+#include <cstdint>
+#include <memory>
+
+namespace arm_compute
+{
+// Forward Declarations
+class ITensor;
+
+/** Function to perform cropping and resizing */
+class CLCropResize : public IFunction
+{
+public:
+ /** Default constructor */
+ CLCropResize();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLCropResize(const CLCropResize &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLCropResize &operator=(const CLCropResize &) = delete;
+ /** Allow instances of this class to be moved */
+ CLCropResize(CLCropResize &&) = default;
+ /** Allow instances of this class to be moved */
+ CLCropResize &operator=(CLCropResize &&) = default;
+ /** Default destructor */
+ virtual ~CLCropResize() = default;
+
+ /** Configure kernel
+ *
+ * @note Supported tensor rank: up to 4
+ * @note Box indices may be outside of the bounds, in which case @p extrapolation_value is used.
+ * @note Start and end indices of boxes are inclusive.
+ *
+ * @param[in] input Source tensor containing N batches of 3D images to be cropped. Data type supported: F32
+ * @param[in] boxes Tensor containing the boxes used to crop the images. Data type supported: F32
+ * @param[in] box_ind One dimensional tensor containing the batch index of the 3D image in @p input that the corresponding
+ * box in @p boxes will be applied to. Data type supported: F32
+ * @param[out] output Destination tensor containing a cropped and resized image for each box in @p boxes. Data type supported: F32
+ * @param[in] crop_size The dimensions that each cropped image will be resized to.
+ * @param[in] method The policy to be used when resizing image. Default is bilinear.
+ * @param[in] extrapolation_value Value to be used for values outside of the image for cropping and resizing. Default is 0.
+ */
+ void configure(const ICLTensor *input, ICLTensor *boxes, ICLTensor *box_ind, ICLTensor *output, Coordinates2D crop_size,
+ InterpolationPolicy method = InterpolationPolicy::BILINEAR, float extrapolation_value = 0);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NESlice
+ *
+ * @note Supported tensor rank: up to 4
+ * @note Box indices may be outside of the bounds, in which case @p extrapolation_value is used.
+ * @note Start and end indices of boxes are inclusive.
+ *
+ * @param[in] input Source tensor info containing N batches of 3D images to be cropped. Data type supported: U16/S16/U32/S32/F16/F32
+ * @param[in] boxes Tensor info for the tensor containing the boxes used to crop the images. Data type supported: F32
+ * @param[in] box_ind Tensor info for the one dimensional tensor containing the batch index of the 3D image in @p input
+ * that the corresponding box in @p boxes will be applied to. Data type supported: F32
+ * @param[in] output Tensor info for the destination tensor containing a cropped and resized image for each box in @p boxes.
+ * Data type supported: F32
+ * @param[in] crop_size The dimensions that each cropped image will be resized to.
+ * @param[in] method The policy to be used when resizing image. Default is bilinear.
+ * @param[in] extrapolation_value Value to be used for values outside of the image for cropping and resizing. Default is 0.
+ *
+ * @return A status
+ */
+ static Status validate(const ITensorInfo *input, ITensorInfo *boxes, ITensorInfo *box_ind, const ITensorInfo *output,
+ Coordinates2D crop_size, InterpolationPolicy method, float extrapolation_value);
+
+ void run() override;
+
+ const ICLTensor *_input;
+ ICLTensor *_boxes;
+ ICLTensor *_box_ind;
+ ICLTensor *_output;
+ size_t _num_boxes;
+ InterpolationPolicy _method;
+ float _extrapolation_value;
+
+ std::unique_ptr<CLScale[]> _scale;
+ std::unique_ptr<CLCopyKernel[]> _copy;
+ std::unique_ptr<CLTensor[]> _crop_results{ nullptr };
+ std::unique_ptr<CLTensor[]> _scaled_results{ nullptr };
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CL_CROP_RESIZE_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index b7973efa9d..5af8a09723 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -212,6 +212,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "copy_plane", "channel_extract.cl" },
{ "copy_planes_3p", "channel_combine.cl" },
{ "copy_to_keypoint", "fast_corners.cl" },
+ { "crop_tensor", "crop_tensor.cl" },
{ "deconvolution_upsample", "deconvolution_layer.cl" },
{ "depthwise_convolution_3x3", "depthwise_convolution.cl" },
{ "depthwise_convolution_3x3_f16", "depthwise_convolution.cl" },
@@ -609,6 +610,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/copy_tensor.clembed"
},
{
+ "crop_tensor.cl",
+#include "./cl_kernels/crop_tensor.clembed"
+ },
+ {
"upsample_layer.cl",
#include "./cl_kernels/upsample_layer.clembed"
},
diff --git a/src/core/CL/cl_kernels/copy_tensor.cl b/src/core/CL/cl_kernels/copy_tensor.cl
index 4bbbf11bea..f4366b889a 100644
--- a/src/core/CL/cl_kernels/copy_tensor.cl
+++ b/src/core/CL/cl_kernels/copy_tensor.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -77,6 +77,7 @@ __kernel void copy_pad_tensor(
}
#endif // Compile time constants
+#if defined(DATA_TYPE)
/** Performs a copy of input tensor to the output tensor.
*
* @param[in] in_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
@@ -103,6 +104,16 @@ __kernel void copy_tensor(
Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(in);
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+#if defined(VEC_SIZE)
+
+#if defined(LAST_ACCESSED_X)
+ // Check if access on width gets out of bounds
+ // If it does then shift access vector to access elements within bounds
+ const int shift = max((int)(get_global_id(0) * VEC_SIZE) - (int)LAST_ACCESSED_X, 0);
+ in.ptr -= shift * in.stride_x;
+ out.ptr -= shift * out.stride_x;
+#endif // defined(LAST_ACCESSED_X)
+
// Load data
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
@@ -110,4 +121,8 @@ __kernel void copy_tensor(
// Store result
VSTORE(VEC_SIZE)
(data, 0, (__global DATA_TYPE *)out.ptr);
+#else // defined(VEC_SIZE)
+ *((__global DATA_TYPE *)(out.ptr)) = *((__global DATA_TYPE *)(in.ptr));
+#endif // defined(VEC_SIZE)
}
+#endif // defined(DATA_TYPE) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/crop_tensor.cl b/src/core/CL/cl_kernels/crop_tensor.cl
new file mode 100644
index 0000000000..55f8544a10
--- /dev/null
+++ b/src/core/CL/cl_kernels/crop_tensor.cl
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) // Compile time constants
+
+/** Performs a copy of input tensor to the output tensor.
+ *
+ * @param[in] in_ptr Pointer to the source tensor. Supported data types: U16/S16/F16/U32/S32/F32
+ * @param[in] in_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] in_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] in_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] in_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] in_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] out_ptr Pointer to the destination tensor. Supported data types: same as @p in_ptr
+ * @param[in] out_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] out_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] out_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] out_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] out_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] in_offset_y The initial offset of the input address along Y.
+ * @param[in] in_offset_z The initial offset of the input address along Z.
+ */
+__kernel void crop_tensor(
+ TENSOR3D_DECLARATION(in),
+ TENSOR3D_DECLARATION(out),
+ int in_offset_y,
+ int in_offset_z)
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(in);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+
+ const int in_x = get_global_id(0) * (in_step_x / in_stride_x);
+
+#if defined(WIDTH_FLIPPED)
+ const int in_y = in_offset_y - get_global_id(1);
+#else // defined(WIDTH_FLIPPED)
+ const int in_y = in_offset_y + get_global_id(1);
+#endif // defined(WIDTH_FLIPPED)
+
+#if defined(HEIGHT_FLIPPED)
+ const int in_z = in_offset_z - get_global_id(2);
+#else // defined(HEIGHT_FLIPPED)
+ const int in_z = in_offset_z + get_global_id(2);
+#endif // defined(HEIGHT_FLIPPED)
+
+#if defined(VEC_SIZE)
+
+#if defined(LAST_ACCESSED_X)
+ // Check if access on width gets out of bounds
+ // If it does then shift access vector to access elements within bounds
+ const int shift = max((int)(get_global_id(0) * VEC_SIZE) - (int)LAST_ACCESSED_X, 0);
+ in.ptr -= shift * in.stride_x;
+ out.ptr -= shift * out.stride_x;
+#endif // defined(LAST_ACCESSED_X)
+
+ __global const uchar *input_addr = tensor3D_offset(&in, in_x, in_y, in_z);
+
+ // Load data
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr);
+
+ // Store result
+ VSTORE(VEC_SIZE)
+ (CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)), 0, (__global float *)out.ptr);
+#else // defined(VEC_SIZE)
+ *((__global float *)(out.ptr)) = CONVERT(*((__global DATA_TYPE *)tensor3D_offset(&in, in_x, in_y, in_z)), float);
+#endif // defined(VEC_SIZE)
+}
+
+#endif // defined(DATA_TYPE) && defined(LAST_ACCESSED_X) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/memset.cl b/src/core/CL/cl_kernels/memset.cl
index 80b34ebdf4..7d8e0ef53f 100644
--- a/src/core/CL/cl_kernels/memset.cl
+++ b/src/core/CL/cl_kernels/memset.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,24 +41,27 @@
* @param[in] value The value used to fill the pages of the tensor
*/
__kernel void memset(
- IMAGE_DECLARATION(tensor))
+ TENSOR3D_DECLARATION(tensor))
{
- Image tensor = CONVERT_TO_IMAGE_STRUCT(tensor);
+ Tensor3D tensor = CONVERT_TO_TENSOR3D_STRUCT(tensor);
-#if defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
+#if defined(VEC_SIZE)
+
+#if defined(LAST_ACCESSED_X)
// Check if access on width gets out of bounds
// If it does shift access vector to access elements within bounds
const int xi = (int)(get_global_id(0) * VEC_SIZE);
tensor.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * tensor_stride_x;
+#endif // defined(LAST_ACCESSED_X)
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
data = (DATA_TYPE)(CONSTANT_VALUE);
VSTORE(VEC_SIZE)
(data, 0, (__global DATA_TYPE *)tensor.ptr);
-#else // !defined(VEC_SIZE) || !defined(LAST_ACCESSED_X)
+#else // !defined(VEC_SIZE)
*((__global DATA_TYPE *)(tensor.ptr)) = (DATA_TYPE)(CONSTANT_VALUE);
-#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
+#endif // defined(VEC_SIZE)
}
#endif // Check for compile time constants
diff --git a/src/core/CL/kernels/CLCopyKernel.cpp b/src/core/CL/kernels/CLCopyKernel.cpp
index e14e5dafab..30a0b8fcb3 100644
--- a/src/core/CL/kernels/CLCopyKernel.cpp
+++ b/src/core/CL/kernels/CLCopyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,38 +37,58 @@ namespace arm_compute
{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList())
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList(), Window *output_window = nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_ON(!padding.empty() && output_window != nullptr);
ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > 4);
// Validate output if initialized
if(output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding), output->tensor_shape());
+ if(output_window == nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding), output->tensor_shape());
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(input->tensor_shape(), output_window->shape());
+ }
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, Window *output_window)
{
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output, *input);
// Configure window
- const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
+ const unsigned int vec_size_x = 16 / input->element_size();
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+ if(output_window == nullptr)
+ {
+ // Create and update the window (if needed)
+ Window win = calculate_max_window(*input, Steps(vec_size_x));
- AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal input_access(input, 0, vec_size_x);
+ AccessWindowHorizontal output_access(output, 0, vec_size_x);
- bool window_changed = update_window_and_padding(win, input_access, output_access);
+ bool window_changed = update_window_and_padding(win, input_access, output_access);
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+ }
+ else
+ {
+ Window win = calculate_max_window(*input);
+ return std::make_pair(Status{}, win);
+ }
}
std::pair<Status, Window> validate_and_configure_window_with_padding(ITensorInfo *input, ITensorInfo *output, const PaddingList &padding)
@@ -131,14 +151,14 @@ void add_padding_as_build_options(const PaddingList &padding, CLBuildOptions &bu
} // namespace
CLCopyKernel::CLCopyKernel()
- : _input(nullptr), _output(nullptr)
+ : _input(nullptr), _output(nullptr), _output_window(), _has_output_window(false)
{
}
-void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding)
+void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const PaddingList &padding, Window *output_window)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, output_window));
_input = input;
_output = output;
@@ -147,21 +167,44 @@ void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const Pa
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
-
std::pair<Status, Window> win_config;
+ const unsigned int vec_size_x = 16 / input->info()->element_size();
+
if(padding.empty())
{
+ // Configure window
+ win_config = validate_and_configure_window(input->info(), output->info(), output_window);
+
+ if(output_window != nullptr)
+ {
+ _has_output_window = true;
+ _output_window = Window(*output_window);
+ const int width_x = output_window->num_iterations(0);
+ const bool multi_access_x = width_x >= static_cast<int32_t>(vec_size_x);
+ const bool remainder_x = width_x % vec_size_x > 0;
+
+ if(multi_access_x)
+ {
+ _output_window.set(Window::DimX, Window::Dimension(output_window->x().start(), ceil_to_multiple(output_window->x().end(), vec_size_x), vec_size_x));
+ win_config.second.set(Window::DimX, Window::Dimension(win_config.second.x().start(), ceil_to_multiple(win_config.second.x().end(), vec_size_x), vec_size_x));
+ }
+
+ build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+ build_opts.add_option_if(multi_access_x && remainder_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(width_x - vec_size_x, 0)));
+ }
+ else
+ {
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+ }
+
// Build kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("copy_tensor", build_opts.options()));
-
- // Configure window
- win_config = validate_and_configure_window(input->info(), output->info());
}
else
{
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+
// Add compile time options
add_padding_as_build_options(padding, build_opts);
@@ -185,13 +228,13 @@ void CLCopyKernel::configure(const ICLTensor *input, ICLTensor *output, const Pa
ICLKernel::configure_internal(win_config.second);
}
-Status CLCopyKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output, const PaddingList &padding)
+Status CLCopyKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output, const PaddingList &padding, Window *output_window)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding, output_window));
if(padding.empty())
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), output_window).first);
}
else
{
@@ -206,16 +249,33 @@ void CLCopyKernel::run(const Window &window, cl::CommandQueue &queue)
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
+ Window slice;
- do
+ if(_has_output_window)
+ {
+ slice = window.first_slice_window_3D();
+ Window out_slice = _output_window.first_slice_window_3D();
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice);
+ add_3D_tensor_argument(idx, _output, out_slice);
+ enqueue(queue, *this, slice);
+ }
+ while(window.slide_window_slice_3D(slice) && _output_window.slide_window_slice_3D(out_slice));
+ }
+ else
{
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice);
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+ slice = collapsed.first_slice_window_3D();
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice);
+ add_3D_tensor_argument(idx, _output, slice);
+ enqueue(queue, *this, slice);
+ }
+ while(collapsed.slide_window_slice_3D(slice));
}
- while(collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLCropKernel.cpp b/src/core/CL/kernels/CLCropKernel.cpp
new file mode 100644
index 0000000000..f8a2456d4a
--- /dev/null
+++ b/src/core/CL/kernels/CLCropKernel.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLCropKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CPP/Validate.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Window.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/helpers/bit_ops.h"
+#include "arm_compute/core/utils/helpers/tensor_transform.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include <map>
+
+namespace arm_compute
+{
+CLCropKernel::CLCropKernel()
+ : _input(nullptr), _output(nullptr), _start(), _batch_index(0), _extrapolation_value(0)
+{
+}
+
+void CLCropKernel::configure(const ICLTensor *input, ICLTensor *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *output_window)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), start, end, batch_index, extrapolation_value, output_window));
+
+ _input = input;
+ _output = output;
+ _start = start;
+ _batch_index = batch_index;
+ _extrapolation_value = extrapolation_value;
+
+ const int vec_size_x = 4;
+ // Create and update the window (if needed)
+ Window win = calculate_max_window(*output->info());
+
+ if(output_window != nullptr)
+ {
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(win, *output_window);
+ win = *output_window;
+ }
+
+ const int output_width_x = win.num_iterations(0);
+ const bool multi_access_x = output_width_x >= vec_size_x;
+ const bool remainder_x = output_width_x % vec_size_x > 0;
+
+ if(multi_access_x)
+ {
+ win.set(Window::DimX,
+ Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
+ }
+ ICLKernel::configure_internal(win);
+
+ // Create kernel
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
+ build_opts.add_option_if(multi_access_x && remainder_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
+ build_opts.add_option_if(start.x > end.x, "-DWIDTH_FLIPPED=");
+ build_opts.add_option_if(start.y > end.y, "-DHEIGHT_FLIPPED=");
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("crop_tensor", build_opts.options()));
+}
+
+Status CLCropKernel::validate(const ITensorInfo *input, const ITensorInfo *output, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *output_window)
+{
+ ARM_COMPUTE_UNUSED(extrapolation_value, output_window);
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16, DataType::S16, DataType::F16, DataType::U32, DataType::S32, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().num_dimensions() > 4);
+ ARM_COMPUTE_RETURN_ERROR_ON(start.x < 0 || start.y < 0 || end.x < 0 || end.y < 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(start.x >= static_cast<int32_t>(input->dimension(1)) || start.y >= static_cast<int32_t>(input->dimension(2))
+ || end.x >= static_cast<int32_t>(input->dimension(1)) || end.y >= static_cast<int32_t>(input->dimension(2)));
+ ARM_COMPUTE_RETURN_ERROR_ON(batch_index >= input->dimension(3));
+ if(output_window != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(output_window->x().step() != 1);
+ }
+ if(output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(output, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 3);
+ }
+ return Status{};
+}
+
+void CLCropKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ Window in_slice = Window();
+ in_slice.use_tensor_dimensions(_input->info()->tensor_shape());
+ in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), ceil_to_multiple(in_slice.x().end(), window.x().step()), window.x().step()));
+ in_slice.set(3, Window::Dimension(_batch_index, _batch_index + 1, 1));
+
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, in_slice);
+ add_3D_tensor_argument(idx, _output, window);
+ add_argument(idx, _start.x);
+ add_argument(idx, _start.y);
+ enqueue(queue, *this, window);
+}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLMemsetKernel.cpp b/src/core/CL/kernels/CLMemsetKernel.cpp
index ab53897543..80caf9406e 100644
--- a/src/core/CL/kernels/CLMemsetKernel.cpp
+++ b/src/core/CL/kernels/CLMemsetKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,27 +35,38 @@
namespace arm_compute
{
CLMemsetKernel::CLMemsetKernel()
- : ICLKernel(), _tensor(nullptr)
+ : ICLKernel(), _tensor(nullptr), _full_window()
{
}
void CLMemsetKernel::configure(ICLTensor *tensor,
- const PixelValue &constant_value)
+ const PixelValue &constant_value,
+ Window *window)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
+ ARM_COMPUTE_ERROR_THROW_ON(validate(tensor->info(), constant_value, window));
+
_tensor = tensor;
- const DataType data_type = tensor->info()->data_type();
- const int vec_size_x = 16 / tensor->info()->element_size();
- const int output_width_x = tensor->info()->tensor_shape().x();
- const bool multi_access_x = (output_width_x / vec_size_x > 0);
+ const DataType data_type = tensor->info()->data_type();
+ const int vec_size_x = 16 / tensor->info()->element_size();
// Create and update the window (if needed)
- Window win = calculate_max_window(*tensor->info());
+ _full_window = calculate_max_window(*tensor->info());
+ Window win = _full_window;
+ if(window != nullptr)
+ {
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(win, *window);
+ win = *window;
+ }
+
+ const int output_width_x = win.num_iterations(0);
+ const bool multi_access_x = output_width_x >= vec_size_x;
+ const bool remainder_x = output_width_x % vec_size_x > 0;
+
if(multi_access_x)
{
- win.set(Window::DimX,
- Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
+ win.set(Window::DimX, Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
}
ICLKernel::configure_internal(win);
@@ -64,14 +75,18 @@ void CLMemsetKernel::configure(ICLTensor *tensor,
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(constant_value, data_type));
build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
+ build_opts.add_option_if(multi_access_x && remainder_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("memset", build_opts.options()));
}
-Status CLMemsetKernel::validate(const ITensorInfo *tensor, const PixelValue &constant_value)
+Status CLMemsetKernel::validate(const ITensorInfo *tensor, const PixelValue &constant_value, Window *window)
{
ARM_COMPUTE_UNUSED(tensor);
ARM_COMPUTE_UNUSED(constant_value);
+ if(window != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(window->x().step() != 1);
+ }
return Status{};
}
@@ -81,15 +96,15 @@ void CLMemsetKernel::run(const Window &window, cl::CommandQueue &queue)
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
// Collapse all the batches on the third
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimY);
- Window slice = collapsed.first_slice_window_2D();
+ Window collapsed = window.collapse_if_possible(_full_window, Window::DimZ);
+ Window slice = collapsed.first_slice_window_3D();
do
{
unsigned int idx = 0;
- add_2D_tensor_argument(idx, _tensor, slice);
+ add_3D_tensor_argument(idx, _tensor, slice);
enqueue(queue, *this, slice);
}
- while(collapsed.slide_window_slice_2D(slice));
+ while(collapsed.slide_window_slice_3D(slice));
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLCropResize.cpp b/src/runtime/CL/functions/CLCropResize.cpp
new file mode 100644
index 0000000000..2cacef1bb1
--- /dev/null
+++ b/src/runtime/CL/functions/CLCropResize.cpp
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/CL/CLHelpers.h"
+
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/CL/functions/CLCropResize.h"
+
+#include <cstddef>
+
+namespace arm_compute
+{
+namespace
+{
+inline void configure_crop(const ICLTensor *input, ICLTensor *crop_boxes, ICLTensor *box_ind, ICLTensor *output, uint32_t crop_box_ind, Coordinates &start, Coordinates &end, uint32_t &batch_index)
+{
+ batch_index = *(reinterpret_cast<int32_t *>(box_ind->ptr_to_element(Coordinates(crop_box_ind))));
+
+ // _crop_box_ind is used to index crop_boxes and retrieve the appropriate crop box.
+ // The crop box is specified by normalized coordinates [y0, x0, y1, x1].
+ const float x0 = *reinterpret_cast<const float *>(crop_boxes->ptr_to_element(Coordinates(1, crop_box_ind)));
+ const float y0 = *reinterpret_cast<const float *>(crop_boxes->ptr_to_element(Coordinates(0, crop_box_ind)));
+ const float x1 = *reinterpret_cast<const float *>(crop_boxes->ptr_to_element(Coordinates(3, crop_box_ind)));
+ const float y1 = *reinterpret_cast<const float *>(crop_boxes->ptr_to_element(Coordinates(2, crop_box_ind)));
+ // The normalized coordinates are scaled to retrieve the floating point image coordinates which are rounded to integers.
+ start = Coordinates(std::floor(x0 * (input->info()->tensor_shape()[1] - 1) + 0.5f),
+ std::floor(y0 * (input->info()->tensor_shape()[2] - 1) + 0.5f));
+ end = Coordinates(std::floor(x1 * (input->info()->tensor_shape()[1] - 1) + 0.5f),
+ std::floor(y1 * (input->info()->tensor_shape()[2] - 1) + 0.5f));
+ const TensorShape out_shape(input->info()->tensor_shape()[0], abs(end[0] - start[0]) + 1, abs(end[1] - start[1]) + 1);
+ output->info()->set_tensor_shape(out_shape);
+}
+
+inline void run_crop(const ICLTensor *input, ICLTensor *output, uint32_t batch_index, Coordinates start, Coordinates end, float extrapolation_value)
+{
+ bool is_width_flipped = end[0] < start[0];
+ bool is_height_flipped = end[1] < start[1];
+ /** The number of rows out of bounds at the start and end of output. */
+ int32_t rows_out_of_bounds[2];
+ /** The number of columns out of bounds at the start and end of output. */
+ int32_t cols_out_of_bounds[2];
+ if(is_height_flipped)
+ {
+ rows_out_of_bounds[0] = start[1] >= static_cast<int32_t>(input->info()->dimension(2)) ? std::min(start[1] - input->info()->dimension(2) + 1, output->info()->dimension(2)) : 0;
+ rows_out_of_bounds[1] = end[1] < 0 ? std::min(-end[1], static_cast<int32_t>(output->info()->dimension(2))) : 0;
+ }
+ else
+ {
+ rows_out_of_bounds[0] = start[1] < 0 ? std::min(-start[1], static_cast<int32_t>(output->info()->dimension(2))) : 0;
+ rows_out_of_bounds[1] = end[1] >= static_cast<int32_t>(input->info()->dimension(2)) ? std::min(end[1] - input->info()->dimension(2) + 1, output->info()->dimension(2)) : 0;
+ }
+ if(is_width_flipped)
+ {
+ cols_out_of_bounds[0] = start[0] >= static_cast<int32_t>(input->info()->dimension(1)) ? std::min(start[0] - input->info()->dimension(1) + 1, output->info()->dimension(1)) : 0;
+ cols_out_of_bounds[1] = end[0] < 0 ? std::min(-end[0], static_cast<int32_t>(output->info()->dimension(1))) : 0;
+ }
+ else
+ {
+ cols_out_of_bounds[0] = start[0] < 0 ? std::min(-start[0], static_cast<int32_t>(output->info()->dimension(1))) : 0;
+ cols_out_of_bounds[1] = end[0] >= static_cast<int32_t>(input->info()->dimension(1)) ? std::min(end[0] - input->info()->dimension(1) + 1, output->info()->dimension(1)) : 0;
+ }
+
+ Window full_window = calculate_max_window(*output->info());
+
+ // Full output window:
+ // --------------------------------
+ // | Out of bounds |
+ // | rows before |
+ // |------------------------------|
+ // | Out of | In | Out of |
+ // | bounds | bounds | bounds |
+ // | cols | elements | cols |
+ // | before | copied | after |
+ // | | from input | |
+ // |------------------------------|
+ // | Out of bounds |
+ // | rows after |
+ // |------------------------------|
+ // Use a separate output window for each section of the full output window.
+ // Fill all output rows that have no elements that are within the input bounds
+ // with the extrapolation value using memset.
+ // First for the rows before the in bounds rows.
+ if(rows_out_of_bounds[0] > 0)
+ {
+ Window slice_fill_rows_before(full_window);
+ slice_fill_rows_before.set(2, Window::Dimension(0, rows_out_of_bounds[0], 1));
+ auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ kernel->configure(output, extrapolation_value, &slice_fill_rows_before);
+ CLScheduler::get().enqueue(*kernel);
+ }
+
+ Window slice_in(full_window);
+ slice_in.set(2, Window::Dimension(rows_out_of_bounds[0], output->info()->dimension(2) - rows_out_of_bounds[1], 1));
+ slice_in.set(1, Window::Dimension(cols_out_of_bounds[0], output->info()->dimension(1) - cols_out_of_bounds[1], 1));
+
+ int rows_in_bounds = static_cast<int32_t>(output->info()->dimension(2)) - rows_out_of_bounds[0] - rows_out_of_bounds[1];
+ if(rows_in_bounds > 0)
+ {
+ // Fill all elements that share a row with an in bounds element with the extrapolation value.
+ if(cols_out_of_bounds[0] > 0)
+ {
+ Window slice_fill_cols_before(slice_in);
+ slice_fill_cols_before.set(1, Window::Dimension(0, cols_out_of_bounds[0], 1));
+ auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ kernel->configure(output, extrapolation_value, &slice_fill_cols_before);
+ CLScheduler::get().enqueue(*kernel);
+ }
+
+ if(cols_out_of_bounds[1] > 0)
+ {
+ Window slice_fill_cols_after(slice_in);
+ slice_fill_cols_after.set(1, Window::Dimension(output->info()->dimension(1) - cols_out_of_bounds[1], output->info()->dimension(1), 1));
+ auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ kernel->configure(output, extrapolation_value, &slice_fill_cols_after);
+ CLScheduler::get().enqueue(*kernel);
+ }
+
+ // Copy all elements within the input bounds from the input tensor.
+ int cols_in_bounds = static_cast<int32_t>(output->info()->dimension(1)) - cols_out_of_bounds[0] - cols_out_of_bounds[1];
+ if(cols_in_bounds > 0)
+ {
+ Coordinates2D start_in{ is_width_flipped ? start[0] - cols_out_of_bounds[0] : start[0] + cols_out_of_bounds[0],
+ is_height_flipped ? start[1] - rows_out_of_bounds[0] : start[1] + rows_out_of_bounds[0] };
+ Coordinates2D end_in{ is_width_flipped ? start_in.x - cols_in_bounds + 1 : start_in.x + cols_in_bounds - 1,
+ is_height_flipped ? start_in.y - rows_in_bounds + 1 : start_in.y + rows_in_bounds - 1 };
+ auto kernel = arm_compute::support::cpp14::make_unique<CLCropKernel>();
+
+ kernel->configure(input, output, start_in, end_in, batch_index, extrapolation_value, &slice_in);
+ CLScheduler::get().enqueue(*kernel);
+ }
+ }
+
+ // Fill all rows after the in bounds elements with the extrapolation value.
+ if(rows_out_of_bounds[1] > 0)
+ {
+ Window slice_fill_rows_after(full_window);
+ slice_fill_rows_after.set(2, Window::Dimension(output->info()->dimension(2) - rows_out_of_bounds[1], output->info()->dimension(2), 1));
+ auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ kernel->configure(output, extrapolation_value, &slice_fill_rows_after);
+ CLScheduler::get().enqueue(*kernel);
+ }
+}
+} // namespace
+
+CLCropResize::CLCropResize()
+ : _input(nullptr), _boxes(nullptr), _box_ind(nullptr), _output(nullptr), _num_boxes(0), _method(), _extrapolation_value(0), _scale(), _copy()
+{
+}
+
+Status CLCropResize::validate(const ITensorInfo *input, ITensorInfo *boxes, ITensorInfo *box_ind, const ITensorInfo *output,
+ Coordinates2D crop_size, InterpolationPolicy method, float extrapolation_value)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON(crop_size.x <= 0 || crop_size.y <= 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(method == InterpolationPolicy::AREA);
+ ARM_COMPUTE_RETURN_ERROR_ON(boxes->tensor_shape()[0] != 4);
+ ARM_COMPUTE_RETURN_ERROR_ON(boxes->tensor_shape()[1] != box_ind->tensor_shape()[0]);
+ TensorInfo temp_info;
+ ARM_COMPUTE_RETURN_ON_ERROR(CLCropKernel::validate(input->clone().get(), &temp_info, { 0, 0 }, { 1, 1 }, input->dimension(3) - 1, extrapolation_value));
+ if(output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(output, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ TensorShape out_shape(input->tensor_shape()[0], crop_size.x, crop_size.y, boxes->tensor_shape()[1]);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), out_shape);
+ }
+ return Status{};
+}
+
+void CLCropResize::configure(const ICLTensor *input, ICLTensor *boxes, ICLTensor *box_ind, ICLTensor *output, Coordinates2D crop_size,
+ InterpolationPolicy method, float extrapolation_value)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(CLCropResize::validate(input->info(), boxes->info(), box_ind->info(), output->info(), crop_size, method, extrapolation_value));
+
+ _num_boxes = boxes->info()->tensor_shape()[1];
+ TensorShape out_shape(input->info()->tensor_shape()[0], crop_size.x, crop_size.y);
+
+ _input = input;
+ _boxes = boxes;
+ _box_ind = box_ind;
+ _output = output;
+ _method = method;
+ _extrapolation_value = extrapolation_value;
+
+ // For each crop box:
+ // - The initial cropped image is produced as specified by boxes[i] from the 3D image input[box_ind[i]].
+ // Possibly using a CLCropKernel and up to four CLMemsetKernels.
+ // - A tensor is required to hold this initial cropped image.
+ // - A scale function is used to resize the cropped image to the size specified by crop_size.
+ // - A tensor is required to hold the final scaled image before it is copied into the 4D output
+ // that will hold all final cropped and scaled 3D images using CLCopyKernel.
+ _crop_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_boxes);
+ _scale = arm_compute::support::cpp14::make_unique<CLScale[]>(_num_boxes);
+ _scaled_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_boxes);
+ _copy = arm_compute::support::cpp14::make_unique<CLCopyKernel[]>(_num_boxes);
+
+ for(unsigned int i = 0; i < _num_boxes; ++i)
+ {
+ TensorInfo crop_result_info(1, DataType::F32);
+ crop_result_info.set_data_layout(DataLayout::NHWC);
+ _crop_results[i].allocator()->init(crop_result_info);
+
+ TensorInfo scaled_result_info(out_shape, 1, DataType::F32);
+ scaled_result_info.set_data_layout(DataLayout::NHWC);
+ _scaled_results[i].allocator()->init(scaled_result_info);
+ }
+}
+
+void CLCropResize::run()
+{
+ ARM_COMPUTE_ERROR_ON_MSG(_output == nullptr, "Unconfigured function");
+ // The contents of _boxes and _box_ind are required to calculate the shape
+ // of the initial cropped image and thus are required to configure the
+ // kernels used for cropping and scaling.
+ _boxes->map(CLScheduler::get().queue());
+ _box_ind->map(CLScheduler::get().queue());
+ for(unsigned int i = 0; i < _num_boxes; ++i)
+ {
+ // Size of the crop box in _boxes and thus the shape of _crop_results[i]
+ // may not be known until run-time and so the kernels cannot be configured until then.
+ uint32_t batch_index;
+ Coordinates start, end;
+ configure_crop(_input, _boxes, _box_ind, &_crop_results[i], i, start, end, batch_index);
+ _scale[i].configure(&_crop_results[i], &_scaled_results[i], _method, BorderMode::CONSTANT, PixelValue(_extrapolation_value), SamplingPolicy::TOP_LEFT);
+
+ Window win = calculate_max_window(*_output->info());
+ win.set(3, Window::Dimension(i, i + 1, 1));
+ _copy[i].configure(&_scaled_results[i], _output, PaddingList(), &win);
+
+ _crop_results[i].allocator()->allocate();
+ _scaled_results[i].allocator()->allocate();
+
+ run_crop(_input, &_crop_results[i], batch_index, start, end, _extrapolation_value);
+ }
+ _boxes->unmap(CLScheduler::get().queue());
+ _box_ind->unmap(CLScheduler::get().queue());
+ CLScheduler::get().sync();
+ for(unsigned int i = 0; i < _num_boxes; ++i)
+ {
+ // Scale the cropped image
+ _scale[i].run();
+ }
+ CLScheduler::get().sync();
+ for(unsigned int i = 0; i < _num_boxes; ++i)
+ {
+ // Copy scaled image into output.
+ CLScheduler::get().enqueue(_copy[i]);
+ }
+ CLScheduler::get().sync();
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/CL/CropResize.cpp b/tests/validation/CL/CropResize.cpp
new file mode 100644
index 0000000000..cacf405c96
--- /dev/null
+++ b/tests/validation/CL/CropResize.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLCropResize.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/CropResizeDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/CropResizeFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(CropResize)
+
+RelativeTolerance<float> tolerance_fp32(0.001f);
+
+template <typename T>
+using CLCropResizeFixture = CropResizeFixture<CLTensor, CLAccessor, CLCropResize, T>;
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(15U, 30U, 40U, 10U), 1, DataType::S32),
+ TensorInfo(TensorShape(15U, 30U, 40U, 10U), 1, DataType::U8), // Invalid input data type.
+ TensorInfo(TensorShape(15U, 30U, 40U, 10U), 1, DataType::S32), // Invalid box_ind shape.
+ TensorInfo(TensorShape(15U, 30U, 40U, 10U), 1, DataType::S32), // Invalid output shape.
+ TensorInfo(TensorShape(15U, 30U, 40U, 10U), 1, DataType::S32), // Invalid output data type.
+ TensorInfo(TensorShape(15U, 30U, 40U, 10U), 1, DataType::S32), // Invalid output shape.
+ TensorInfo(TensorShape(15U, 30U, 40U, 10U), 1, DataType::S32), // Invalid boxes shape.
+ }),
+ framework::dataset::make("BoxesInfo",{ TensorInfo(TensorShape(4, 20), 1, DataType::F32),
+ TensorInfo(TensorShape(4, 20), 1, DataType::F32),
+ TensorInfo(TensorShape(4, 20), 1, DataType::F32),
+ TensorInfo(TensorShape(4, 20), 1, DataType::F32),
+ TensorInfo(TensorShape(4, 20), 1, DataType::F32),
+ TensorInfo(TensorShape(4, 20), 1, DataType::F32),
+ TensorInfo(TensorShape(3, 20), 1, DataType::F32),
+ })),
+ framework::dataset::make("BoxIndInfo",{ TensorInfo(TensorShape(20), 1, DataType::S32),
+ TensorInfo(TensorShape(20), 1, DataType::S32),
+ TensorInfo(TensorShape(10), 1, DataType::S32),
+ TensorInfo(TensorShape(20), 1, DataType::S32),
+ TensorInfo(TensorShape(20), 1, DataType::S32),
+ TensorInfo(TensorShape(20), 1, DataType::S32),
+ TensorInfo(TensorShape(20), 1, DataType::S32),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(15U, 5, 5, 20U), 1, DataType::F32),
+ TensorInfo(TensorShape(15U, 5, 5, 20U), 1, DataType::F32),
+ TensorInfo(TensorShape(15U, 5, 5, 20U), 1, DataType::F32),
+ TensorInfo(TensorShape(15U, 5, 5, 10U), 1, DataType::F32),
+ TensorInfo(TensorShape(15U, 5, 5, 20U), 1, DataType::S32),
+ TensorInfo(TensorShape(5U, 5, 5, 20U), 1, DataType::F32),
+ TensorInfo(TensorShape(15U, 5, 5, 20U), 1, DataType::F32),
+ })),
+ framework::dataset::make("Expected", { true, false, false, false, false, false, false})),
+ input, boxes, box_ind, output, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(CLCropResize::validate(&input.clone()->set_data_layout(DataLayout::NHWC).set_is_resizable(false),
+ &boxes.clone()->set_is_resizable(false),
+ &box_ind.clone()->set_is_resizable(false),
+ &output.clone()->set_data_layout(DataLayout::NHWC).set_is_resizable(false),
+ Coordinates2D{ 5, 5 }, InterpolationPolicy::BILINEAR, 100)) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+TEST_SUITE(Float)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(F16)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLCropResizeFixture<half>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallCropResizeDataset(),
+ combine(framework::dataset::make("IsOutOfBounds", { true, false }),
+ framework::dataset::make("DataType", DataType::F16))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01);
+}
+TEST_SUITE_END() // F16
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+TEST_SUITE(F32)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLCropResizeFixture<float>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallCropResizeDataset(),
+ combine(framework::dataset::make("IsOutOfBounds", { true, false }),
+ framework::dataset::make("DataType", DataType::F32))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01);
+}
+TEST_SUITE_END() // F32
+TEST_SUITE_END() // Float
+
+TEST_SUITE(U16)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLCropResizeFixture<uint16_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallCropResizeDataset(),
+ combine(framework::dataset::make("IsOutOfBounds", { true, false }),
+ framework::dataset::make("DataType", DataType::U16))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01);
+}
+TEST_SUITE_END() // U16
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLCropResizeFixture<int16_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallCropResizeDataset(),
+ combine(framework::dataset::make("IsOutOfBounds", { true, false }),
+ framework::dataset::make("DataType", DataType::S16))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(U32)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLCropResizeFixture<uint32_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallCropResizeDataset(),
+ combine(framework::dataset::make("IsOutOfBounds", { true, false }),
+ framework::dataset::make("DataType", DataType::U32))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01);
+}
+TEST_SUITE_END() // U32
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLCropResizeFixture<int32_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallCropResizeDataset(),
+ combine(framework::dataset::make("IsOutOfBounds", { true, false }),
+ framework::dataset::make("DataType", DataType::S32))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE_END() // CropResize
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute