aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/FullyConnectedLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/NEON/FullyConnectedLayer.cpp')
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp193
1 files changed, 169 insertions, 24 deletions
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index 4bb48bf42c..8ba0f1f771 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,8 @@
#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/cpu/operators/CpuFullyConnected.h"
#include "tests/NEON/Accessor.h"
#include "tests/PaddingCalculator.h"
#include "tests/datasets/FullyConnectedLayerDataset.h"
@@ -94,6 +96,149 @@ const auto ActivationFunctionsQuantizedDataset = framework::dataset::make("Activ
TEST_SUITE(NEON)
TEST_SUITE(FullyConnectedLayer)
+/** Test case for memory injection in @ref cpu::CpuFullyConnected.
+ *
+ * Configure the operator once and inject memory at run-time in multiple executions.
+ *
+ * Checks performed in order:
+ * - Both runs compute the same output
+ */
+TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
+{
+ auto fc = std::make_unique<cpu::CpuFullyConnected>();
+ const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
+ const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto fc_info = FullyConnectedLayerInfo{};
+ fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
+
+ // telhs are newly created every call of this lambda function
+ auto src = create_tensor<Tensor>(src_info);
+ auto weight = create_tensor<Tensor>(weight_info);
+ auto bias = create_tensor<Tensor>(bias_info);
+ src.allocator()->allocate();
+ weight.allocator()->allocate();
+ bias.allocator()->allocate();
+
+ ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
+ ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
+
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
+
+ auto run_conv = [&]() -> Tensor
+ {
+ auto dst = create_tensor<Tensor>(dst_info);
+ dst.allocator()->allocate();
+ run_pack.add_tensor(TensorType::ACL_DST, &dst);
+
+ library->fill_tensor_value(Accessor(src), 1.f);
+ library->fill_tensor_value(Accessor(weight), 2.f);
+ library->fill_tensor_value(Accessor(bias), 3.f);
+ // This operator is configured once and captured by this lambda.
+ fc->prepare(prep_pack);
+ fc->run(run_pack);
+ return dst;
+ };
+ auto result_0 = run_conv();
+ auto result_1 = run_conv();
+ for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
+ {
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
+ }
+}
+
+/** Test case for memory injection in @ref NEFullyConnectedLayer.
+ *
+ * Make sure @ref NEFullyConnectedLayer still works through injecting the memory at configure time using the old API.
+ *
+ * Checks performed in order:
+ * - Both runs compute the same output
+ */
+TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
+{
+ auto fc = std::make_unique<NEFullyConnectedLayer>();
+ const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
+ const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
+ const auto fc_info = FullyConnectedLayerInfo{};
+ auto run_conv = [&]()
+ {
+ auto src = create_tensor<Tensor>(src_info);
+ auto weight = create_tensor<Tensor>(weight_info);
+ auto bias = create_tensor<Tensor>(bias_info);
+ auto dst = create_tensor<Tensor>(dst_info);
+ fc->configure(&src, &weight, &bias, &dst, fc_info);
+ src.allocator()->allocate();
+ weight.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+ library->fill_tensor_value(Accessor(src), 1.f);
+ library->fill_tensor_value(Accessor(weight), 2.f);
+ library->fill_tensor_value(Accessor(bias), 3.f);
+ fc->run();
+ return dst;
+ };
+ auto result_0 = run_conv();
+ auto result_1 = run_conv();
+ for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
+ {
+ ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
+ }
+}
+
+/** Unit test for @ref cpu::CpuFullyConnected with quantized multipler > 1
+ *
+ * Tests output correctness.
+ */
+TEST_CASE(Quant8_Signed_Mult_gt_1, framework::DatasetMode::ALL)
+{
+ auto fc = std::make_unique<cpu::CpuFullyConnected>();
+ const auto src_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5f, -1));
+ const auto weight_info = TensorInfo(TensorShape(1U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5, -8));
+ const auto bias_info = TensorInfo(TensorShape(1U), 1, DataType::S32);
+ auto dst_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.1f, 0));
+ const auto fc_info = FullyConnectedLayerInfo{};
+ fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
+
+ // telhs are newly created every call of this lambda function
+ auto src = create_tensor<Tensor>(src_info);
+ auto weight = create_tensor<Tensor>(weight_info);
+ auto bias = create_tensor<Tensor>(bias_info);
+ auto dst = create_tensor<Tensor>(dst_info);
+ src.allocator()->allocate();
+ weight.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &dst } };
+ ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
+
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
+
+ // Initialize input values
+ const std::vector<int8_t> src_values = { 3, 63, 31 };
+ const std::vector<int8_t> weight_values = { -4 };
+ const std::vector<int32_t> bias_values = { 16 };
+ const std::vector<int32_t> expected = { 80, 127, 127 };
+ library->fill_static_values(Accessor(src), src_values);
+ library->fill_static_values(Accessor(weight), weight_values);
+ library->fill_static_values(Accessor(bias), bias_values);
+
+ // Run FC layer
+ fc->prepare(prep_pack);
+ fc->run(run_pack);
+
+ auto dst_ptr = reinterpret_cast<int8_t *>(dst.buffer());
+ for(size_t i = 0; i < dst.info()->tensor_shape().total_size(); ++i)
+ {
+ ARM_COMPUTE_EXPECT(dst_ptr[i] == expected[i], framework::LogLevel::ERRORS);
+ }
+}
+
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
@@ -186,13 +331,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<float>, framework:
validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
- framework::dataset::make("Weights", TensorShape(315U, 271U))),
- framework::dataset::make("Biases", TensorShape(271U))),
- framework::dataset::make("Output", TensorShape(271U))),
- FullyConnectedParameters),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
{
// Validate output
validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
@@ -235,14 +380,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<uint8_t>,
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
- framework::dataset::make("Weights", TensorShape(315U, 271U))),
- framework::dataset::make("Biases", TensorShape(271U))),
- framework::dataset::make("Output", TensorShape(271U))),
- FullyConnectedParameters),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- QuantizationData),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -282,14 +427,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<int8_t>,
}
FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(combine(
- framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
- framework::dataset::make("Weights", TensorShape(315U, 271U))),
- framework::dataset::make("Biases", TensorShape(271U))),
- framework::dataset::make("Output", TensorShape(271U))),
- FullyConnectedParameters),
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- QuantizationData),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);