aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h121
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEUpsampleLayer.h73
-rw-r--r--src/core/NEON/kernels/NEUpsampleLayerKernel.cpp376
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp2
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp2
-rw-r--r--src/runtime/NEON/functions/NEUpsampleLayer.cpp52
-rw-r--r--tests/validation/NEON/Upsample.cpp156
-rw-r--r--tests/validation/fixtures/UpsampleLayerFixture.h14
-rw-r--r--tests/validation/reference/UpsampleLayer.cpp12
-rw-r--r--tests/validation/reference/UpsampleLayer.h2
12 files changed, 798 insertions, 14 deletions
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 2b76c0bd29..cb2e851b66 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -113,6 +113,7 @@
#include "arm_compute/core/NEON/kernels/NETableLookupKernel.h"
#include "arm_compute/core/NEON/kernels/NEThresholdKernel.h"
#include "arm_compute/core/NEON/kernels/NETransposeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEWarpKernel.h"
#include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
#include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h b/arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h
new file mode 100644
index 0000000000..cf21e6cf73
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEUPSAMPLELAYERKERNEL_H__
+#define __ARM_COMPUTE_NEUPSAMPLELAYERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Interface for the Upsample layer kernel.*/
+class NEUpsampleLayerKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NEUpsampleLayerKernel";
+ }
+ /** Default constructor */
+ NEUpsampleLayerKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEUpsampleLayerKernel(const NEUpsampleLayerKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEUpsampleLayerKernel &operator=(const NEUpsampleLayerKernel &) = delete;
+ /** Default Move Constructor. */
+ NEUpsampleLayerKernel(NEUpsampleLayerKernel &&) = default;
+ /** Default move assignment operator */
+ NEUpsampleLayerKernel &operator=(NEUpsampleLayerKernel &&) = default;
+ /** Default destructor */
+ ~NEUpsampleLayerKernel() = default;
+ /** Set the input output tensors.
+ *
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] info Contains stride information described in @ref Size2D.
+ * @param[in] policy Defines the policy to fill the intermediate pixels.
+ *
+ */
+ void configure(const ITensor *input, ITensor *output, const Size2D &info, const InterpolationPolicy policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEUpsampleLayerKernel
+ *
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] output Destination tensor info. Data types supported: same as @p input.
+ * @param[in] info Contains stride information described in @ref Size2D.
+ * @param[in] policy Defines the policy to fill the intermediate pixels.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &info, const InterpolationPolicy policy);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ /** Function to run upsample layer for FP32 (NCHW)
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ void upsample_f32_nchw(const Window &window);
+ /** Function to run upsample layer for FP32 (NHWC)
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ void upsample_f32_nhwc(const Window &window);
+ /** Function to run upsample layer for FP16 (NCHW)
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ void upsample_f16_nchw(const Window &window);
+ /** Function to run upsample layer for FP16 (NHWC)
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ void upsample_f16_nhwc(const Window &window);
+ /** Function to run upsample layer for QASYMM8 (NCHW)
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ void upsample_qasymm8_nchw(const Window &window);
+ /** Function to run upsample layer for QASYMM8 (NHWC)
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ void upsample_qasymm8_nhwc(const Window &window);
+ /** Common signature for all the upsample layer functions
+ *
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using UpsampleFunctionPtr = void (NEUpsampleLayerKernel::*)(const Window &window);
+
+private:
+ UpsampleFunctionPtr _func;
+ const ITensor *_input;
+ ITensor *_output;
+ Size2D _info;
+ unsigned int _num_elems_processed_per_iteration_x;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEUPSAMPLELAYERKERNEL_H__ */
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index d9b7269efc..2bf8bcd515 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -115,6 +115,7 @@
#include "arm_compute/runtime/NEON/functions/NETableLookup.h"
#include "arm_compute/runtime/NEON/functions/NEThreshold.h"
#include "arm_compute/runtime/NEON/functions/NETranspose.h"
+#include "arm_compute/runtime/NEON/functions/NEUpsampleLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWarpAffine.h"
#include "arm_compute/runtime/NEON/functions/NEWarpPerspective.h"
#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h"
diff --git a/arm_compute/runtime/NEON/functions/NEUpsampleLayer.h b/arm_compute/runtime/NEON/functions/NEUpsampleLayer.h
new file mode 100644
index 0000000000..b88e71c0d7
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEUpsampleLayer.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEUPSAMPLELAYER_H__
+#define __ARM_COMPUTE_NEUPSAMPLELAYER_H__
+
+#include "arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/Tensor.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Function to run upsample layer */
+class NEUpsampleLayer : public IFunction
+{
+public:
+ /** Constructor */
+ NEUpsampleLayer();
+ /** Set the input output tensors.
+ *
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] info Contains stride information described in @ref Size2D.
+ * @param[in] policy Defines the policy to fill the intermediate pixels.
+ *
+ */
+ void configure(const ITensor *input, ITensor *output, const Size2D &info,
+ const InterpolationPolicy &policy);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEUpsampleLayer
+ *
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[out] output Destination tensor info. Data types supported: same as @p input.
+ * @param[in] info Contains stride information described in @ref Size2D.
+ * @param[in] policy Defines the policy to fill the intermediate pixels.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &info,
+ const InterpolationPolicy &policy);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ NEUpsampleLayerKernel _kernel;
+ DataLayout _data_layout;
+};
+} // arm_compute
+#endif /* __ARM_COMPUTE_NEUPSAMPLELAYER_H__ */
diff --git a/src/core/NEON/kernels/NEUpsampleLayerKernel.cpp b/src/core/NEON/kernels/NEUpsampleLayerKernel.cpp
new file mode 100644
index 0000000000..5dca58edd8
--- /dev/null
+++ b/src/core/NEON/kernels/NEUpsampleLayerKernel.cpp
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace
+{
+std::pair<Status, Window> validate_and_configure_window_nchw(ITensorInfo *input, ITensorInfo *output, int num_elems_processed_per_iteration_x, const Size2D &info)
+{
+ const int num_elems_processed_per_iteration_x_out = num_elems_processed_per_iteration_x * info.x();
+ Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x_out));
+ AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration_x, 1, 0.5f, 0.5f);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_x_out);
+ bool window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, output->valid_region());
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+
+std::pair<Status, Window> validate_and_configure_window_nhwc(ITensorInfo *input, ITensorInfo *output, int num_elems_processed_per_iteration_x, const Size2D &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x));
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration_x);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_x);
+ bool window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, output->valid_region());
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, int num_elems_processed_per_iteration_x, const Size2D &info)
+{
+ std::pair<Status, Window> win_config;
+ switch(input->data_layout())
+ {
+ case DataLayout::NCHW:
+ win_config = validate_and_configure_window_nchw(input, output, num_elems_processed_per_iteration_x, info);
+ break;
+ case DataLayout::NHWC:
+ win_config = validate_and_configure_window_nhwc(input, output, num_elems_processed_per_iteration_x, info);
+ break;
+ default:
+ win_config = std::make_pair(ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported data layout!"), Window{});
+ }
+
+ return win_config;
+}
+} // namespace
+NEUpsampleLayerKernel::NEUpsampleLayerKernel()
+ : _func(nullptr), _input(nullptr), _output(nullptr), _info(), _num_elems_processed_per_iteration_x()
+{
+}
+
+Status NEUpsampleLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &info, const InterpolationPolicy policy)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_UNUSED(policy);
+
+ const DataLayout data_layout = input->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.x() != 2 || info.y() != 2, "Only stride 2 is supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(policy != InterpolationPolicy::NEAREST_NEIGHBOR, "Only nearest neighbor policy supported");
+
+ // Check output if configured
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(idx_width) != info.x() * input->dimension(idx_width));
+ ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(idx_height) != info.y() * input->dimension(idx_height));
+ }
+
+ const int num_elems_processed_per_iteration_x = 16 / input->element_size();
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
+ output->clone().get(), num_elems_processed_per_iteration_x, info)
+ .first);
+ return Status{};
+}
+
+void NEUpsampleLayerKernel::upsample_f32_nchw(const arm_compute::Window &window)
+{
+ Window window_in(window);
+ window_in.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _num_elems_processed_per_iteration_x));
+
+ Window window_out(window);
+ window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.y()));
+
+ Iterator input(_input, window_in);
+ Iterator output(_output, window_out);
+ const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float);
+
+ execute_window_loop(window_out, [&](const Coordinates & id)
+ {
+ const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr()));
+ const float32x4_t data_out1 = { vgetq_lane_f32(data, 0), vgetq_lane_f32(data, 0), vgetq_lane_f32(data, 1), vgetq_lane_f32(data, 1) };
+ const float32x4_t data_out2 = { vgetq_lane_f32(data, 2), vgetq_lane_f32(data, 2), vgetq_lane_f32(data, 3), vgetq_lane_f32(data, 3) };
+ auto out = reinterpret_cast<float *>(output.ptr());
+
+ vst1q_f32(out, data_out1);
+ vst1q_f32(out + 4, data_out2);
+ vst1q_f32(out + offset_y_out, data_out1);
+ vst1q_f32(out + offset_y_out + 4, data_out2);
+ },
+ input, output);
+}
+
+void NEUpsampleLayerKernel::upsample_f32_nhwc(const arm_compute::Window &window)
+{
+ Window window_out(window);
+ window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.x()));
+ window_out.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(2), _info.y()));
+
+ Iterator input(_input, window);
+ Iterator output(_output, window_out);
+
+ const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float);
+ const int offset_z_out = _output->info()->strides_in_bytes().z() / sizeof(float);
+
+ execute_window_loop(window_out, [&](const Coordinates & id)
+ {
+ const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr()));
+ auto out = reinterpret_cast<float *>(output.ptr());
+
+ vst1q_f32(out, data);
+ vst1q_f32(out + offset_y_out, data);
+ vst1q_f32(out + offset_z_out, data);
+ vst1q_f32(out + offset_y_out + offset_z_out, data);
+ },
+ input, output);
+}
+
+void NEUpsampleLayerKernel::upsample_qasymm8_nchw(const arm_compute::Window &window)
+{
+ Window window_in(window);
+ window_in.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _num_elems_processed_per_iteration_x));
+
+ Window window_out(window);
+ window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.y()));
+
+ Iterator input(_input, window_in);
+ Iterator output(_output, window_out);
+ const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(uint8_t);
+
+ execute_window_loop(window_out, [&](const Coordinates & id)
+ {
+ const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr()));
+ const uint8x16_t data_out1 = { vgetq_lane_u8(data, 0), vgetq_lane_u8(data, 0), vgetq_lane_u8(data, 1), vgetq_lane_u8(data, 1),
+ vgetq_lane_u8(data, 2), vgetq_lane_u8(data, 2), vgetq_lane_u8(data, 3), vgetq_lane_u8(data, 3),
+ vgetq_lane_u8(data, 4), vgetq_lane_u8(data, 4), vgetq_lane_u8(data, 5), vgetq_lane_u8(data, 5),
+ vgetq_lane_u8(data, 6), vgetq_lane_u8(data, 6), vgetq_lane_u8(data, 7), vgetq_lane_u8(data, 7)
+ };
+ const uint8x16_t data_out2 =
+ {
+ vgetq_lane_u8(data, 8), vgetq_lane_u8(data, 8), vgetq_lane_u8(data, 9), vgetq_lane_u8(data, 9),
+ vgetq_lane_u8(data, 10), vgetq_lane_u8(data, 10), vgetq_lane_u8(data, 11), vgetq_lane_u8(data, 11),
+ vgetq_lane_u8(data, 12), vgetq_lane_u8(data, 12), vgetq_lane_u8(data, 13), vgetq_lane_u8(data, 13),
+ vgetq_lane_u8(data, 14), vgetq_lane_u8(data, 14), vgetq_lane_u8(data, 15), vgetq_lane_u8(data, 15)
+ };
+ auto out = reinterpret_cast<uint8_t *>(output.ptr());
+
+ vst1q_u8(out, data_out1);
+ vst1q_u8(out + 16, data_out2);
+ vst1q_u8(out + offset_y_out, data_out1);
+ vst1q_u8(out + offset_y_out + 16, data_out2);
+ },
+ input, output);
+}
+
+void NEUpsampleLayerKernel::upsample_qasymm8_nhwc(const arm_compute::Window &window)
+{
+ Window window_out(window);
+ window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.x()));
+ window_out.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(2), _info.y()));
+
+ Iterator input(_input, window);
+ Iterator output(_output, window_out);
+
+ const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(uint8_t);
+ const int offset_z_out = _output->info()->strides_in_bytes().z() / sizeof(uint8_t);
+ execute_window_loop(window_out, [&](const Coordinates & id)
+ {
+ const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr()));
+ auto out = reinterpret_cast<uint8_t *>(output.ptr());
+
+ vst1q_u8(out, data);
+ vst1q_u8(out + offset_y_out, data);
+ vst1q_u8(out + offset_z_out, data);
+ vst1q_u8(out + offset_y_out + offset_z_out, data);
+ },
+ input, output);
+}
+
+void NEUpsampleLayerKernel::upsample_f16_nchw(const arm_compute::Window &window)
+{
+ ARM_COMPUTE_UNUSED(window);
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ Window window_in(window);
+ window_in.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _num_elems_processed_per_iteration_x));
+
+ Window window_out(window);
+ window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.y()));
+
+ Iterator input(_input, window_in);
+ Iterator output(_output, window_out);
+ const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float16_t);
+
+ execute_window_loop(window_out, [&](const Coordinates & id)
+ {
+ const float16x8_t data = vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr()));
+ const float16x8_t data_out1 = { vgetq_lane_f16(data, 0), vgetq_lane_f16(data, 0), vgetq_lane_f16(data, 1), vgetq_lane_f16(data, 1),
+ vgetq_lane_f16(data, 2), vgetq_lane_f16(data, 2), vgetq_lane_f16(data, 3), vgetq_lane_f16(data, 3)
+ };
+ const float16x8_t data_out2 = { vgetq_lane_f16(data, 4), vgetq_lane_f16(data, 4), vgetq_lane_f16(data, 5), vgetq_lane_f16(data, 5),
+ vgetq_lane_f16(data, 6), vgetq_lane_f16(data, 6), vgetq_lane_f16(data, 7), vgetq_lane_f16(data, 7)
+ };
+ auto out = reinterpret_cast<float16_t *>(output.ptr());
+
+ vst1q_f16(out, data_out1);
+ vst1q_f16(out + 8, data_out2);
+ vst1q_f16(out + offset_y_out, data_out1);
+ vst1q_f16(out + offset_y_out + 8, data_out2);
+ },
+ input, output);
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+}
+
+void NEUpsampleLayerKernel::upsample_f16_nhwc(const arm_compute::Window &window)
+{
+ ARM_COMPUTE_UNUSED(window);
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ Window window_out(window);
+ window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.x()));
+ window_out.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(2), _info.y()));
+
+ Iterator input(_input, window);
+ Iterator output(_output, window_out);
+ const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float16_t);
+ const int offset_z_out = _output->info()->strides_in_bytes().z() / sizeof(float16_t);
+
+ execute_window_loop(window_out, [&](const Coordinates & id)
+ {
+ const float16x8_t data = vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr()));
+ auto out = reinterpret_cast<float16_t *>(output.ptr());
+
+ vst1q_f16(out, data);
+ vst1q_f16(out + offset_y_out, data);
+ vst1q_f16(out + offset_z_out, data);
+ vst1q_f16(out + offset_y_out + offset_z_out, data);
+ },
+ input, output);
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+}
+
+void NEUpsampleLayerKernel::configure(const ITensor *input, ITensor *output, const Size2D &info, const InterpolationPolicy policy)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_UNUSED(policy);
+
+ _input = input;
+ _output = output;
+ _info = info;
+
+ const DataLayout data_layout = input->info()->data_layout();
+
+ TensorShape output_shape = misc::shape_calculator::compute_upsample_shape(*input->info(), info);
+ auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
+ output->info()->set_data_layout(data_layout);
+
+ // Perform validation step
+ ARM_COMPUTE_ERROR_THROW_ON(NEUpsampleLayerKernel::validate(input->info(), output->info(), info, policy));
+
+ _num_elems_processed_per_iteration_x = 16 / output->info()->element_size();
+
+ switch(data_layout)
+ {
+ case DataLayout::NCHW:
+ {
+ switch(input->info()->data_type())
+ {
+ case DataType::QASYMM8:
+ _func = &NEUpsampleLayerKernel::upsample_qasymm8_nchw;
+ break;
+ case DataType::F32:
+ _func = &NEUpsampleLayerKernel::upsample_f32_nchw;
+ break;
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ case DataType::F16:
+ _func = &NEUpsampleLayerKernel::upsample_f16_nchw;
+ break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+ break;
+ }
+ case DataLayout::NHWC:
+ {
+ switch(input->info()->data_type())
+ {
+ case DataType::QASYMM8:
+ _func = &NEUpsampleLayerKernel::upsample_qasymm8_nhwc;
+ break;
+ case DataType::F32:
+ _func = &NEUpsampleLayerKernel::upsample_f32_nhwc;
+ break;
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ case DataType::F16:
+ _func = &NEUpsampleLayerKernel::upsample_f16_nhwc;
+ break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
+ // Configure window
+ std::pair<Status, Window> win_config = validate_and_configure_window(input->info(),
+ output->info(),
+ _num_elems_processed_per_iteration_x,
+ info);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ INEKernel::configure(win_config.second);
+}
+
+void NEUpsampleLayerKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON(_func == nullptr);
+
+ (this->*_func)(window);
+}
+} // namespace arm_compute
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index ec70a5ae93..508e4e8ec4 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -215,6 +215,8 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
return detail::create_resize_layer<NEScale, NETargetInfo>(*polymorphic_downcast<ResizeLayerNode *>(node));
case NodeType::SoftmaxLayer:
return detail::create_softmax_layer<NESoftmaxLayer, NETargetInfo>(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
+ case NodeType::UpsampleLayer:
+ return detail::create_upsample_layer<NEUpsampleLayer, NETargetInfo>(*polymorphic_downcast<UpsampleLayerNode *>(node), ctx);
default:
return nullptr;
}
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index 4f35298c7d..fd3e6f5391 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -68,7 +68,7 @@ Status NENodeValidator::validate(INode *node)
case NodeType::SliceLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : SliceLayer");
case NodeType::UpsampleLayer:
- return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : UpsampleLayer");
+ return detail::validate_upsample_layer<NEUpsampleLayer>(*polymorphic_downcast<UpsampleLayerNode *>(node));
case NodeType::YOLOLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : YOLOLayer");
default:
diff --git a/src/runtime/NEON/functions/NEUpsampleLayer.cpp b/src/runtime/NEON/functions/NEUpsampleLayer.cpp
new file mode 100644
index 0000000000..9be96af66a
--- /dev/null
+++ b/src/runtime/NEON/functions/NEUpsampleLayer.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEUpsampleLayer.h"
+
+#include "arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h"
+
+namespace arm_compute
+{
+NEUpsampleLayer::NEUpsampleLayer()
+ : _kernel(), _data_layout()
+{
+}
+
+Status NEUpsampleLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &info,
+ const InterpolationPolicy &policy)
+{
+ return NEUpsampleLayerKernel::validate(input, output, info, policy);
+}
+
+void NEUpsampleLayer::configure(const ITensor *input, ITensor *output, const Size2D &info, const InterpolationPolicy &policy)
+{
+ _data_layout = input->info()->data_layout();
+ _kernel.configure(input, output, info, policy);
+}
+
+void NEUpsampleLayer::run()
+{
+ const auto win = (_data_layout == DataLayout::NCHW) ? Window::DimZ : Window::DimX;
+ NEScheduler::get().schedule(&_kernel, win);
+}
+} // namespace arm_compute
diff --git a/tests/validation/NEON/Upsample.cpp b/tests/validation/NEON/Upsample.cpp
new file mode 100644
index 0000000000..39b69ee1e3
--- /dev/null
+++ b/tests/validation/NEON/Upsample.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEUpsampleLayer.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/UpsampleLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(NEON)
+TEST_SUITE(UpsampleLayer)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32))),
+ input_shape, data_type)
+{
+ InterpolationPolicy policy = InterpolationPolicy::NEAREST_NEIGHBOR;
+ Size2D info = Size2D(2, 2);
+
+ // Create tensors
+ Tensor src = create_tensor<Tensor>(input_shape, data_type, 1);
+ Tensor dst;
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ NEUpsampleLayer upsample;
+ upsample.configure(&src, &dst, info, policy);
+
+ // Validate valid region
+ const ValidRegion src_valid_region = shape_to_valid_region(src.info()->tensor_shape());
+ const ValidRegion dst_valid_region = shape_to_valid_region(dst.info()->tensor_shape());
+
+ validate(src.info()->valid_region(), src_valid_region);
+ validate(dst.info()->valid_region(), dst_valid_region);
+}
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 10U, 2U), 1, DataType::F32), // Mismatching data type
+ TensorInfo(TensorShape(10U, 10U, 2U), 1, DataType::F32), // Invalid output shape
+ TensorInfo(TensorShape(10U, 10U, 2U), 1, DataType::F32), // Invalid stride
+ TensorInfo(TensorShape(10U, 10U, 2U), 1, DataType::F32), // Invalid policy
+ TensorInfo(TensorShape(32U, 32U), 1, DataType::F32),
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(20U, 20U, 2U), 1, DataType::F16),
+ TensorInfo(TensorShape(20U, 10U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(20U, 20U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(20U, 20U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(64U, 64U), 1, DataType::F32),
+ })),
+ framework::dataset::make("PadInfo", { Size2D(2, 2),
+ Size2D(2, 2),
+ Size2D(1, 1),
+ Size2D(2, 2),
+ Size2D(2, 2),
+ })),
+ framework::dataset::make("UpsamplingPolicy", { InterpolationPolicy::NEAREST_NEIGHBOR,
+ InterpolationPolicy::NEAREST_NEIGHBOR,
+ InterpolationPolicy::NEAREST_NEIGHBOR,
+ InterpolationPolicy::BILINEAR,
+ InterpolationPolicy::NEAREST_NEIGHBOR,
+ })),
+ framework::dataset::make("Expected", { false, false, false, false, true })),
+ input_info, output_info, pad_info, policy, expected)
+{
+ bool is_valid = bool(NEUpsampleLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pad_info, policy));
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using NEUpsampleLayerFixture = UpsampleLayerFixture<Tensor, Accessor, NEUpsampleLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEUpsampleLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("PadInfo", { Size2D(2, 2) })),
+ framework::dataset::make("UpsamplingPolicy", { InterpolationPolicy::NEAREST_NEIGHBOR })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP32
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEUpsampleLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("PadInfo", { Size2D(2, 2) })),
+ framework::dataset::make("UpsamplingPolicy", { InterpolationPolicy::NEAREST_NEIGHBOR })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+TEST_SUITE_END() // Float
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEUpsampleLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("PadInfo", { Size2D(2, 2) })),
+ framework::dataset::make("UpsamplingPolicy", { InterpolationPolicy::NEAREST_NEIGHBOR })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // Quantized
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE_END() // UpsampleLayer
+TEST_SUITE_END() // NEON
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/UpsampleLayerFixture.h b/tests/validation/fixtures/UpsampleLayerFixture.h
index 8fc3565e18..40229e2866 100644
--- a/tests/validation/fixtures/UpsampleLayerFixture.h
+++ b/tests/validation/fixtures/UpsampleLayerFixture.h
@@ -45,12 +45,12 @@ class UpsampleLayerFixture : public framework::Fixture
public:
template <typename...>
void setup(TensorShape input_shape, DataType data_type, DataLayout data_layout,
- Size2D info, const InterpolationPolicy &upsampling_policy)
+ Size2D info, const InterpolationPolicy &policy)
{
_data_type = data_type;
- _target = compute_target(input_shape, info, upsampling_policy, data_type, data_layout);
- _reference = compute_reference(input_shape, info, upsampling_policy, data_type);
+ _target = compute_target(input_shape, info, policy, data_type, data_layout);
+ _reference = compute_reference(input_shape, info, policy, data_type);
}
protected:
@@ -61,7 +61,7 @@ protected:
}
TensorType compute_target(TensorShape input_shape,
- const Size2D &info, const InterpolationPolicy &upsampling_policy, DataType data_type, DataLayout data_layout)
+ const Size2D &info, const InterpolationPolicy &policy, DataType data_type, DataLayout data_layout)
{
if(data_layout == DataLayout::NHWC)
{
@@ -74,7 +74,7 @@ protected:
// Create and configure function
FunctionType upsample;
- upsample.configure(&src, &dst, info, upsampling_policy);
+ upsample.configure(&src, &dst, info, policy);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -96,7 +96,7 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape,
- const Size2D &info, const InterpolationPolicy &upsampling_policy, DataType data_type)
+ const Size2D &info, const InterpolationPolicy &policy, DataType data_type)
{
// Create reference
SimpleTensor<T> src{ input_shape, data_type };
@@ -104,7 +104,7 @@ protected:
// Fill reference
fill(src, 0);
- return reference::upsample_layer<T>(src, info, upsampling_policy);
+ return reference::upsample_layer<T>(src, info, policy);
}
TensorType _target{};
diff --git a/tests/validation/reference/UpsampleLayer.cpp b/tests/validation/reference/UpsampleLayer.cpp
index 3a340d0905..876f6d794a 100644
--- a/tests/validation/reference/UpsampleLayer.cpp
+++ b/tests/validation/reference/UpsampleLayer.cpp
@@ -35,10 +35,10 @@ namespace reference
{
template <typename T>
SimpleTensor<T> upsample_layer(const SimpleTensor<T> &src,
- const Size2D &info, const InterpolationPolicy upsampling_policy)
+ const Size2D &info, const InterpolationPolicy policy)
{
- ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
- ARM_COMPUTE_UNUSED(upsampling_policy);
+ ARM_COMPUTE_ERROR_ON(policy != InterpolationPolicy::NEAREST_NEIGHBOR);
+ ARM_COMPUTE_UNUSED(policy);
TensorShape output_shape = src.shape();
output_shape.set(0, src.shape().x() * info.x());
@@ -77,9 +77,11 @@ SimpleTensor<T> upsample_layer(const SimpleTensor<T> &src,
}
template SimpleTensor<float> upsample_layer(const SimpleTensor<float> &src,
- const Size2D &info, const InterpolationPolicy upsampling_policy);
+ const Size2D &info, const InterpolationPolicy policy);
template SimpleTensor<half> upsample_layer(const SimpleTensor<half> &src,
- const Size2D &info, const InterpolationPolicy upsampling_policy);
+ const Size2D &info, const InterpolationPolicy policy);
+template SimpleTensor<uint8_t> upsample_layer(const SimpleTensor<uint8_t> &src,
+ const Size2D &info, const InterpolationPolicy policy);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/UpsampleLayer.h b/tests/validation/reference/UpsampleLayer.h
index fc1da39186..ecb458a0c6 100644
--- a/tests/validation/reference/UpsampleLayer.h
+++ b/tests/validation/reference/UpsampleLayer.h
@@ -37,7 +37,7 @@ namespace reference
{
template <typename T>
SimpleTensor<T> upsample_layer(const SimpleTensor<T> &src,
- const Size2D &info, const InterpolationPolicy upsampling_policy);
+ const Size2D &info, const InterpolationPolicy policy);
} // namespace reference
} // namespace validation
} // namespace test