aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSanghoon Lee <sanghoon.lee@arm.com>2018-01-23 15:16:47 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commitf47bfb97fa8bc928a7860b84b7b227f716f65e58 (patch)
tree6623bc798f312e0f1836f5df0fe82d3bde3e2f95
parentbe1f4a7f12e41f4988d4157f35dcb951cf31b72d (diff)
downloadComputeLibrary-f47bfb97fa8bc928a7860b84b7b227f716f65e58.tar.gz
COMPMID-594: Implement reference and CL/NEON validation for LocallyConnected
Change-Id: I01e7abcf3f1b19458128e277044af850ad9fa224 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/118610 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp6
-rw-r--r--src/runtime/CL/functions/CLLocallyConnectedLayer.cpp7
-rw-r--r--tests/datasets/LocallyConnectedDataset.h79
-rw-r--r--tests/validation/CL/LocallyConnected.cpp95
-rw-r--r--tests/validation/NEON/LocallyConnected.cpp96
-rw-r--r--tests/validation/fixtures/LocallyConnectedFixture.h133
-rw-r--r--tests/validation/reference/Convolution3d.h223
-rw-r--r--tests/validation/reference/ConvolutionLayer.cpp190
-rw-r--r--tests/validation/reference/LocallyConnected.cpp111
-rw-r--r--tests/validation/reference/LocallyConnected.h44
10 files changed, 795 insertions, 189 deletions
diff --git a/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp
index 58da0402bc..35beb82689 100644
--- a/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -186,7 +186,7 @@ void vector_matrix_multiply_f32(const ITensor *input0, const ITensor *input1, IT
win_out.set(Window::DimX, Window::Dimension(window_start_x, window_end_x, window_step_x));
Window win_a(window);
- win_a.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win_a.set(Window::DimX, Window::Dimension(0, 0, 0));
Iterator ina(input0, win_a);
Iterator out(output, win_out);
@@ -234,7 +234,7 @@ void vector_matrix_multiply_f32(const ITensor *input0, const ITensor *input1, IT
asm volatile("PLD [%0, #128*1]" ::"r"(reinterpret_cast<const uint8_t *>(matrix_b + 2 * in_b_stride)));
asm volatile("PLD [%0, #128*1]" ::"r"(reinterpret_cast<const uint8_t *>(matrix_b + 3 * in_b_stride)));
asm volatile("PLD [%0, #128*1]" ::"r"(reinterpret_cast<const uint8_t *>(matrix_b + 4 * in_b_stride)));
-#endif /* __arm __ */
+#endif /* __arm__ */
acc0 = vmlaq_lane_f32(acc0, b00, a0l, 0);
acc1 = vmlaq_lane_f32(acc1, b01, a0l, 0);
diff --git a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
index 9120aadf17..d284949323 100644
--- a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
@@ -67,10 +67,13 @@ void CLLocallyConnectedLayer::configure(const ICLTensor *input, const ICLTensor
std::tie(stride_x, stride_y) = conv_info.stride();
std::tie(pad_x, pad_y) = conv_info.pad();
+ const unsigned int kernel_width = weights->info()->dimension(0);
+ const unsigned int kernel_height = weights->info()->dimension(1);
+
// Get convolved dimensions
unsigned int conv_w = 0;
unsigned int conv_h = 0;
- std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0), weights->info()->dimension(1),
+ std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
conv_info);
ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
@@ -106,7 +109,7 @@ void CLLocallyConnectedLayer::configure(const ICLTensor *input, const ICLTensor
_memory_group.manage(&_gemm_output);
// Configure kernels
- _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(conv_w, conv_h), conv_info, _has_bias);
+ _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
_weights_reshape_kernel.configure(weights, biases, &_weights_reshaped);
_mm_kernel.configure(&_input_im2col_reshaped, &_weights_reshaped, &_gemm_output);
_output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h));
diff --git a/tests/datasets/LocallyConnectedDataset.h b/tests/datasets/LocallyConnectedDataset.h
new file mode 100644
index 0000000000..cc2fa88f02
--- /dev/null
+++ b/tests/datasets/LocallyConnectedDataset.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_LOCALLYCONNECTED_DATASET
+#define ARM_COMPUTE_TEST_LOCALLYCONNECTED_DATASET
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+#include "tests/datasets/ConvolutionLayerDataset.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class SmallLocallyConnectedDataset final : public ConvolutionLayerDataset
+{
+public:
+ SmallLocallyConnectedDataset()
+ {
+ // Batch size 1
+ add_config(TensorShape(23U, 27U, 5U), TensorShape(3U, 3U, 5U, 21U, 275U), TensorShape(21U, 275U), TensorShape(11U, 25U, 21U), PadStrideInfo(2, 1, 0, 0));
+ add_config(TensorShape(17U, 31U, 2U), TensorShape(5U, 5U, 2U, 19U, 225U), TensorShape(19U, 225U), TensorShape(15U, 15U, 19U), PadStrideInfo(1, 2, 1, 1));
+ add_config(TensorShape(17U, 31U, 2U), TensorShape(5U, 3U, 2U, 19U, 240U), TensorShape(19U, 240U), TensorShape(15U, 16U, 19U), PadStrideInfo(1, 2, 1, 1));
+ // Batch size 4
+ add_config(TensorShape(23U, 27U, 5U, 4U), TensorShape(3U, 3U, 5U, 21U, 275U), TensorShape(21U, 275U), TensorShape(11U, 25U, 21U, 4U), PadStrideInfo(2, 1, 0, 0));
+ add_config(TensorShape(17U, 31U, 2U, 4U), TensorShape(5U, 5U, 2U, 19U, 225U), TensorShape(19U, 225U), TensorShape(15U, 15U, 19U, 4U), PadStrideInfo(1, 2, 1, 1));
+ add_config(TensorShape(17U, 31U, 2U, 4U), TensorShape(5U, 3U, 2U, 19U, 240U), TensorShape(19U, 240U), TensorShape(15U, 16U, 19U, 4U), PadStrideInfo(1, 2, 1, 1));
+ // FC convolution
+ add_config(TensorShape(1U, 1U, 1024U), TensorShape(1U, 1U, 1024U, 1001U, 1U), TensorShape(1001U, 1U), TensorShape(1U, 1U, 1001U), PadStrideInfo(1, 1, 0, 0));
+ }
+};
+
+class LargeLocallyConnectedDataset final : public ConvolutionLayerDataset
+{
+public:
+ LargeLocallyConnectedDataset()
+ {
+ // Batch size 1
+ add_config(TensorShape(23U, 27U, 5U), TensorShape(3U, 1U, 5U, 21U, 297U), TensorShape(21U, 297U), TensorShape(11U, 27U, 21U), PadStrideInfo(2, 1, 0, 0));
+ add_config(TensorShape(33U, 27U, 7U), TensorShape(5U, 5U, 7U, 16U, 132U), TensorShape(16U, 132U), TensorShape(11U, 12U, 16U), PadStrideInfo(3, 2, 1, 0));
+ add_config(TensorShape(33U, 27U, 7U), TensorShape(5U, 7U, 7U, 16U, 121U), TensorShape(16U, 121U), TensorShape(11U, 11U, 16U), PadStrideInfo(3, 2, 1, 0));
+ // Batch size 4
+ add_config(TensorShape(23U, 27U, 5U, 4U), TensorShape(3U, 1U, 5U, 21U, 297U), TensorShape(21U, 297U), TensorShape(11U, 27U, 21U, 4U), PadStrideInfo(2, 1, 0, 0));
+ add_config(TensorShape(33U, 27U, 7U, 4U), TensorShape(5U, 5U, 7U, 16U, 132U), TensorShape(16U, 132U), TensorShape(11U, 12U, 16U, 4U), PadStrideInfo(3, 2, 1, 0));
+ add_config(TensorShape(33U, 27U, 7U, 4U), TensorShape(5U, 7U, 7U, 16U, 121U), TensorShape(16U, 121U), TensorShape(11U, 11U, 16U, 4U), PadStrideInfo(3, 2, 1, 0));
+ // Arbitrary batch size
+ add_config(TensorShape(33U, 27U, 7U, 5U), TensorShape(5U, 7U, 7U, 16U, 121U), TensorShape(16U, 121U), TensorShape(11U, 11U, 16U, 5U), PadStrideInfo(3, 2, 1, 0));
+ }
+};
+
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_LOCALLYCONNECTED_DATASET */
diff --git a/tests/validation/CL/LocallyConnected.cpp b/tests/validation/CL/LocallyConnected.cpp
new file mode 100644
index 0000000000..05cab29226
--- /dev/null
+++ b/tests/validation/CL/LocallyConnected.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLLocallyConnectedLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/LocallyConnectedDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/LocallyConnectedFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(LocallyConnected)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallLocallyConnectedDataset(), datasets::LargeLocallyConnectedDataset()),
+ framework::dataset::make("DataType", DataType::F32)),
+ src_shape, weights_shape, bias_shape, dst_shape, info, data_type)
+{
+ // Create tensors
+ CLTensor src = create_tensor<CLTensor>(src_shape, data_type);
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type);
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type);
+ CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function.
+ CLLocallyConnectedLayer lc;
+ lc.configure(&src, &weights, &bias, &dst, info);
+
+ // Validate valid region
+ const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
+ validate(dst.info()->valid_region(), dst_valid_region);
+}
+
+template <typename T>
+using CLLocallyConnectedFixture = LocallyConnectedValidationFixture<CLTensor, CLAccessor, CLLocallyConnectedLayer, T>;
+FIXTURE_DATA_TEST_CASE(RunSmall, CLLocallyConnectedFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallLocallyConnectedDataset(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLLocallyConnectedFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeLocallyConnectedDataset(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/NEON/LocallyConnected.cpp b/tests/validation/NEON/LocallyConnected.cpp
new file mode 100644
index 0000000000..56430d9650
--- /dev/null
+++ b/tests/validation/NEON/LocallyConnected.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NELocallyConnectedLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/LocallyConnectedDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/LocallyConnectedFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+constexpr RelativeTolerance<float> tolerance_f32(0.0001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(LocallyConnected)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallLocallyConnectedDataset(), datasets::LargeLocallyConnectedDataset()),
+ framework::dataset::make("DataType", DataType::F32)),
+ src_shape, weights_shape, bias_shape, dst_shape, info, data_type)
+{
+ // Create tensors
+ Tensor src = create_tensor<Tensor>(src_shape, data_type);
+ Tensor weights = create_tensor<Tensor>(weights_shape, data_type);
+ Tensor bias = create_tensor<Tensor>(bias_shape, data_type);
+ Tensor dst = create_tensor<Tensor>(dst_shape, data_type);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function.
+ NELocallyConnectedLayer lc;
+ lc.configure(&src, &weights, &bias, &dst, info);
+
+ // Validate valid region
+ const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
+ validate(dst.info()->valid_region(), dst_valid_region);
+}
+
+template <typename T>
+using NELocallyConnectedFixture = LocallyConnectedValidationFixture<Tensor, Accessor, NELocallyConnectedLayer, T>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NELocallyConnectedFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallLocallyConnectedDataset(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NELocallyConnectedFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::LargeLocallyConnectedDataset(),
+ framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/LocallyConnectedFixture.h b/tests/validation/fixtures/LocallyConnectedFixture.h
new file mode 100644
index 0000000000..ab9819e56f
--- /dev/null
+++ b/tests/validation/fixtures/LocallyConnectedFixture.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_LOCALLY_CONNECTED_FIXTURE
+#define ARM_COMPUTE_TEST_LOCALLY_CONNECTED_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/LocallyConnected.h"
+#include "tests/validation/reference/Utils.h"
+
+#include <random>
+
+namespace arm_compute
+{
+class NELocallyConnected;
+
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LocallyConnectedValidationFixture : public framework::Fixture
+{
+public:
+ using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
+
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, DataType data_type)
+ {
+ _data_type = data_type;
+ _bias_data_type = data_type;
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
+ {
+ TensorShape reshaped_weights_shape(weights_shape);
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type);
+
+ // Create and configure function
+ FunctionType locally_connected;
+ locally_connected.configure(&src, &weights, &bias, &dst, info);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src), 0);
+ fill(AccessorType(weights), 1);
+ fill(AccessorType(bias), 2);
+
+ locally_connected.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
+ {
+ // Create reference
+ SimpleTensor<T> src(input_shape, _data_type);
+ SimpleTensor<T> weights(weights_shape, _data_type);
+ SimpleTensor<TBias> bias(bias_shape, _bias_data_type);
+
+ // Fill reference
+ fill(src, 0);
+ fill(weights, 1);
+ fill(bias, 2);
+
+ return reference::locally_connected<T>(src, weights, bias, output_shape, info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataType _bias_data_type{};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_LOCALLY_CONNECTED_FIXTURE */
diff --git a/tests/validation/reference/Convolution3d.h b/tests/validation/reference/Convolution3d.h
new file mode 100644
index 0000000000..b99d534635
--- /dev/null
+++ b/tests/validation/reference/Convolution3d.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *asymm_int_mult
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, asymm_int_multDAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H__
+#define __ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H__
+
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "tests/validation/FixedPoint.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/UtilsQuantizedAsymm.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace convolution_3d
+{
+namespace detail
+{
+inline bool is_valid_pixel(int i, int min, int max)
+{
+ return (i >= min && i < max);
+}
+
+// 3D convolution for floating point type
+template < typename T, typename TB, typename std::enable_if < validation::is_floating_point<T>::value &&validation::is_floating_point<TB>::value, int >::type = 0 >
+inline void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
+ int i_offset, int w_offset, int b_offset, int o_offset,
+ int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
+{
+ const T *in_ptr = in.data() + i_offset;
+ const T *w_ptr = weights.data() + w_offset;
+ const TB *b_ptr = bias.data() + b_offset;
+ T *out_ptr = out.data() + o_offset;
+
+ const int half_width_weights_start = width_weights / 2;
+ const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
+ const int half_height_weights_start = height_weights / 2;
+ const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
+
+ // Reset accumulator
+ T acc(0);
+
+ // Compute a 2D convolution for each IFM and accumulate the result
+ for(int ifm = 0; ifm < depth_in; ++ifm)
+ {
+ // Compute the offset for the input slice
+ const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
+
+ // Compute 2D convolution
+ for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
+ {
+ for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
+ {
+ // Check if the pixel is out-of-bound
+ if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
+ {
+ const int idx = xk + half_width_weights_start;
+ const int idy = yk + half_height_weights_start;
+
+ const T i_value = in_ptr[offset_slice_in + xk + yk * width_in];
+ const T w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
+
+ acc += i_value * w_value;
+ }
+ }
+ }
+ }
+
+ // Accumulate the bias and store the result
+ *out_ptr = acc + (*b_ptr);
+}
+
+// 3D convolution for fixed point type
+template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
+inline void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
+ int i_offset, int w_offset, int b_offset, int o_offset,
+ int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
+{
+ const T *in_ptr = in.data() + i_offset;
+ const T *w_ptr = weights.data() + w_offset;
+ const T *b_ptr = bias.data() + b_offset;
+ T *out_ptr = out.data() + o_offset;
+ int fixed_point_position = in.fixed_point_position();
+
+ const int half_width_weights_start = width_weights / 2;
+ const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
+ const int half_height_weights_start = height_weights / 2;
+ const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
+
+ using namespace fixed_point_arithmetic;
+ using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
+
+ // Reset accumulator
+ fixed_point<promoted_type> acc(0, fixed_point_position);
+
+ // Compute a 2D convolution for each IFM and accumulate the result
+ for(int ifm = 0; ifm < depth_in; ++ifm)
+ {
+ // Compute the offset for the input slice
+ const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
+
+ // Compute 2D convolution
+ for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
+ {
+ for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
+ {
+ // Check if the pixel is out-of-bound
+ if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
+ {
+ const int idx = xk + half_width_weights_start;
+ const int idy = yk + half_height_weights_start;
+
+ const fixed_point<promoted_type> i_value(in_ptr[offset_slice_in + xk + yk * width_in], fixed_point_position, true);
+ const fixed_point<promoted_type> w_value(w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
+ const fixed_point<promoted_type> iw = i_value * w_value;
+ acc = iw + acc;
+ }
+ }
+ }
+ }
+
+ // Get the bias
+ const fixed_point<promoted_type> b(*b_ptr, fixed_point_position, true);
+
+ // Accumulate the bias and covert back
+ acc = acc + b;
+ fixed_point<T> res(acc);
+ *out_ptr = res.raw();
+}
+
+// 3D convolution for QASYMM8 type
+template <>
+inline void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &out,
+ int i_offset, int w_offset, int b_offset, int o_offset,
+ int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
+{
+ const uint8_t *in_ptr = in.data() + i_offset;
+ const uint8_t *w_ptr = weights.data() + w_offset;
+ const int32_t *b_ptr = bias.data() + b_offset;
+ uint8_t *out_ptr = out.data() + o_offset;
+
+ const int input_offset = -in.quantization_info().offset;
+ const float input_scale = in.quantization_info().scale;
+ const int weights_offset = -weights.quantization_info().offset;
+ const float weights_scale = weights.quantization_info().scale;
+ const int output_offset = out.quantization_info().offset;
+ const float output_scale = out.quantization_info().scale;
+
+ int output_multiplier = 0;
+ int output_shift = 0;
+ const float multiplier = input_scale * weights_scale / output_scale;
+ arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+
+ const int half_width_weights_start = width_weights / 2;
+ const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
+ const int half_height_weights_start = height_weights / 2;
+ const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
+
+ // Reset accumulator
+ int32_t acc(0);
+
+ // Compute a 2D convolution for each IFM and accumulate the result
+ for(int ifm = 0; ifm < depth_in; ++ifm)
+ {
+ // Compute the offset for the input slice
+ const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
+
+ // Compute 2D convolution
+ for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
+ {
+ for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
+ {
+ // Check if the pixel is out-of-bound
+ if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
+ {
+ const int idx = xk + half_width_weights_start;
+ const int idy = yk + half_height_weights_start;
+
+ const uint8_t i_value = in_ptr[offset_slice_in + xk + yk * width_in];
+ const uint8_t w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
+
+ acc += (i_value + input_offset) * (w_value + weights_offset);
+ }
+ }
+ }
+ }
+
+ // Accumulate the bias
+ acc += (*b_ptr);
+
+ acc = validation::asymm_rounding_divide_by_pow2(validation::asymm_int_mult(acc, output_multiplier), output_shift);
+ acc += output_offset;
+ acc = utility::clamp<int32_t>(acc, 0, 255);
+
+ // Store the result
+ *out_ptr = acc;
+}
+} // namespace detail
+} // namespace convolution_3d
+} // namespace test
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H__ */
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index b7ed2f56c0..24bbf32a30 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -25,6 +25,7 @@
#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/Convolution3d.h"
#include "tests/validation/reference/Utils.h"
#include "tests/validation/reference/UtilsQuantizedAsymm.h"
@@ -42,185 +43,6 @@ namespace reference
{
namespace
{
-inline bool is_valid_pixel(int i, int min, int max)
-{
- return (i >= min && i < max);
-}
-
-// 3D convolution for floating point type
-template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
-void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
- int i_offset, int w_offset, int b_offset, int o_offset,
- int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
-{
- const T *in_ptr = in.data() + i_offset;
- const T *w_ptr = weights.data() + w_offset;
- const TB *b_ptr = bias.data() + b_offset;
- T *out_ptr = out.data() + o_offset;
-
- const int half_width_weights_start = width_weights / 2;
- const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
- const int half_height_weights_start = height_weights / 2;
- const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
-
- // Reset accumulator
- T acc(0);
-
- // Compute a 2D convolution for each IFM and accumulate the result
- for(int ifm = 0; ifm < depth_in; ++ifm)
- {
- // Compute the offset for the input slice
- const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
-
- // Compute 2D convolution
- for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
- {
- for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
- {
- // Check if the pixel is out-of-bound
- if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
- {
- const int idx = xk + half_width_weights_start;
- const int idy = yk + half_height_weights_start;
-
- const T i_value = in_ptr[offset_slice_in + xk + yk * width_in];
- const T w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
-
- acc += i_value * w_value;
- }
- }
- }
- }
-
- // Accumulate the bias and store the result
- *out_ptr = acc + (*b_ptr);
-}
-
-// 3D convolution for fixed point type
-template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
-void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
- int i_offset, int w_offset, int b_offset, int o_offset,
- int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
-{
- const T *in_ptr = in.data() + i_offset;
- const T *w_ptr = weights.data() + w_offset;
- const T *b_ptr = bias.data() + b_offset;
- T *out_ptr = out.data() + o_offset;
- int fixed_point_position = in.fixed_point_position();
-
- const int half_width_weights_start = width_weights / 2;
- const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
- const int half_height_weights_start = height_weights / 2;
- const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
-
- using namespace fixed_point_arithmetic;
- using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
- // Reset accumulator
- fixed_point<promoted_type> acc(0, fixed_point_position);
-
- // Compute a 2D convolution for each IFM and accumulate the result
- for(int ifm = 0; ifm < depth_in; ++ifm)
- {
- // Compute the offset for the input slice
- const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
-
- // Compute 2D convolution
- for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
- {
- for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
- {
- // Check if the pixel is out-of-bound
- if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
- {
- const int idx = xk + half_width_weights_start;
- const int idy = yk + half_height_weights_start;
-
- const fixed_point<promoted_type> i_value(in_ptr[offset_slice_in + xk + yk * width_in], fixed_point_position, true);
- const fixed_point<promoted_type> w_value(w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
- const fixed_point<promoted_type> iw = i_value * w_value;
- acc = iw + acc;
- }
- }
- }
- }
-
- // Get the bias
- const fixed_point<promoted_type> b(*b_ptr, fixed_point_position, true);
-
- // Accumulate the bias and covert back
- acc = acc + b;
- fixed_point<T> res(acc);
- *out_ptr = res.raw();
-}
-
-// 3D convolution for QASYMM8 type
-template <>
-void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &out,
- int i_offset, int w_offset, int b_offset, int o_offset,
- int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
-{
- const uint8_t *in_ptr = in.data() + i_offset;
- const uint8_t *w_ptr = weights.data() + w_offset;
- const int32_t *b_ptr = bias.data() + b_offset;
- uint8_t *out_ptr = out.data() + o_offset;
-
- const int input_offset = -in.quantization_info().offset;
- const float input_scale = in.quantization_info().scale;
- const int weights_offset = -weights.quantization_info().offset;
- const float weights_scale = weights.quantization_info().scale;
- const int output_offset = out.quantization_info().offset;
- const float output_scale = out.quantization_info().scale;
-
- int output_multiplier = 0;
- int output_shift = 0;
- const float multiplier = input_scale * weights_scale / output_scale;
- arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
-
- const int half_width_weights_start = width_weights / 2;
- const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
- const int half_height_weights_start = height_weights / 2;
- const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
-
- // Reset accumulator
- int32_t acc(0);
-
- // Compute a 2D convolution for each IFM and accumulate the result
- for(int ifm = 0; ifm < depth_in; ++ifm)
- {
- // Compute the offset for the input slice
- const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
-
- // Compute 2D convolution
- for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
- {
- for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
- {
- // Check if the pixel is out-of-bound
- if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
- {
- const int idx = xk + half_width_weights_start;
- const int idy = yk + half_height_weights_start;
-
- const uint8_t i_value = in_ptr[offset_slice_in + xk + yk * width_in];
- const uint8_t w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
-
- acc += (i_value + input_offset) * (w_value + weights_offset);
- }
- }
- }
- }
-
- // Accumulate the bias
- acc += (*b_ptr);
-
- acc = asymm_rounding_divide_by_pow2(asymm_int_mult(acc, output_multiplier), output_shift);
- acc += output_offset;
- acc = utility::clamp<int32_t>(acc, 0, 255);
-
- // Store the result
- *out_ptr = acc;
-}
} // namespace
template <typename T, typename TB>
@@ -270,11 +92,11 @@ SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor
ARM_COMPUTE_ASSERT(yo < height_out);
// Compute 3D convolution
- convolution3d(src, weights, bias, dst,
- offset_in, ofm * width_weights * height_weights * depth_weights, ofm, offset_out,
- xi, yi,
- width_in, height_in, depth_in,
- width_weights, height_weights);
+ convolution_3d::detail::convolution3d(src, weights, bias, dst,
+ offset_in, ofm * width_weights * height_weights * depth_weights, ofm, offset_out,
+ xi, yi,
+ width_in, height_in, depth_in,
+ width_weights, height_weights);
}
}
}
diff --git a/tests/validation/reference/LocallyConnected.cpp b/tests/validation/reference/LocallyConnected.cpp
new file mode 100644
index 0000000000..08e3f02761
--- /dev/null
+++ b/tests/validation/reference/LocallyConnected.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "LocallyConnected.h"
+
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/Convolution3d.h"
+#include "tests/validation/reference/Utils.h"
+
+#include "tests/framework/Asserts.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename TB>
+SimpleTensor<T> locally_connected(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape, const PadStrideInfo &info)
+{
+ // Create reference
+ SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+
+ // Compute reference
+ const int width_in = src.shape().x();
+ const int height_in = src.shape().y();
+ const int depth_in = src.shape().z();
+
+ const int width_out = dst.shape().x();
+ const int height_out = dst.shape().y();
+ const int depth_out = dst.shape().z();
+
+ const int width_weights = weights.shape().x();
+ const int height_weights = weights.shape().y();
+ const int depth_weights = weights.shape().z();
+
+ const int pad_left = info.pad_left();
+ const int pad_top = info.pad_top();
+ const int stride_xi = info.stride().first;
+ const int stride_yi = info.stride().second;
+
+ auto output_wh = scaled_dimensions(width_in, height_in, width_weights, height_weights, info);
+
+ const int start_xi = width_weights / 2 - pad_left;
+ const int start_yi = height_weights / 2 - pad_top;
+ const int end_xi = output_wh.first * stride_xi;
+ const int end_yi = output_wh.second * stride_yi;
+ const int num_batches = src.shape().total_size() / (width_in * height_in * depth_in);
+
+ for(int r = 0; r < num_batches; ++r)
+ {
+ int count = 0;
+ for(int yi = start_yi; yi < start_yi + end_yi; yi += stride_yi)
+ {
+ for(int xi = start_xi; xi < start_xi + end_xi; xi += stride_xi)
+ {
+ for(int ofm = 0; ofm < depth_out; ++ofm)
+ {
+ // Compute input and output offsets
+ const int offset_in = r * width_in * height_in * depth_in;
+ const int xo = (xi - start_xi) / stride_xi;
+ const int yo = (yi - start_yi) / stride_yi;
+ const int offset_out = xo + yo * width_out + ofm * width_out * height_out + r * width_out * height_out * depth_out;
+
+ ARM_COMPUTE_ASSERT(xo < width_out);
+ ARM_COMPUTE_ASSERT(yo < height_out);
+
+ // Compute 3D convolution
+ convolution_3d::detail::convolution3d(src, weights, bias, dst,
+ offset_in, count * width_weights * height_weights * depth_weights, count, offset_out,
+ xi, yi,
+ width_in, height_in, depth_in,
+ width_weights, height_weights);
+ count++;
+ }
+ }
+ }
+ }
+
+ return dst;
+}
+
+// Locally Connected only supports F32
+template SimpleTensor<float> locally_connected(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &output_shape,
+ const PadStrideInfo &info);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/LocallyConnected.h b/tests/validation/reference/LocallyConnected.h
new file mode 100644
index 0000000000..bf78d2c02a
--- /dev/null
+++ b/tests/validation/reference/LocallyConnected.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_LOCALLY_CONNECTED_H__
+#define __ARM_COMPUTE_TEST_LOCALLY_CONNECTED_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename TB>
+SimpleTensor<T> locally_connected(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape, const PadStrideInfo &info);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_LOCALLY_CONNECTED_H__ */