aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2017-06-26 14:18:47 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commit172e57028ef14f2f8d6c56edc53c5c85f97e07cd (patch)
treeb3fe8c05902f07fb2381cf6dfd893654c8ccb63f /tests
parent579c0498e161215be1a36080b0b454e5198a992a (diff)
downloadComputeLibrary-172e57028ef14f2f8d6c56edc53c5c85f97e07cd.tar.gz
COMPMID-425 Port CLBatchnormalization to support QS8/QS16
Change-Id: I46c93305f377666ea0915ff789b7dfdfff596087 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78862 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp228
-rw-r--r--tests/validation/FixedPoint.h2
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp9
-rw-r--r--tests/validation/Reference.cpp9
-rw-r--r--tests/validation/TensorOperations.h20
5 files changed, 255 insertions, 13 deletions
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
new file mode 100644
index 0000000000..9b9df2e902
--- /dev/null
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "CL/CLAccessor.h"
+#include "Globals.h"
+#include "TensorLibrary.h"
+#include "TypePrinter.h"
+#include "Utils.h"
+#include "dataset/BatchNormalizationLayerDataset.h"
+#include "tests/validation/Helpers.h"
+#include "validation/Datasets.h"
+#include "validation/Reference.h"
+#include "validation/Validation.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+
+#include <random>
+
+using namespace arm_compute;
+using namespace arm_compute::test;
+using namespace arm_compute::test::cl;
+using namespace arm_compute::test::validation;
+
+namespace
+{
+const float tolerance_f = 1e-05; /**< Tolerance value for comparing reference's output against floating point implementation's output */
+const float tolerance_qs8 = 3; /**< Tolerance value for comparing reference's output against quantized implementation's output */
+const float tolerance_qs16 = 6; /**< Tolerance value for comparing reference's output against quantized implementation's output */
+
+/** Compute Neon batch normalization function.
+ *
+ * @param[in] shape Shape of the input and output tensors.
+ * @param[in] dt Data type of input and output tensors.
+ * @param[in] norm_info Normalization Layer information.
+ *
+ * @return Computed output tensor.
+ */
+CLTensor compute_reference_batch_normalization_layer(const TensorShape &shape0, const TensorShape &shape1, DataType dt, float epsilon, int fixed_point_position = 0)
+{
+ // Create tensors
+ CLTensor src = create_tensor<CLTensor>(shape0, dt, 1, fixed_point_position);
+ CLTensor dst = create_tensor<CLTensor>(shape0, dt, 1, fixed_point_position);
+ CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+ CLTensor var = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+ CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+ CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+
+ // Create and configure function
+ CLBatchNormalizationLayer norm;
+ norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+ mean.allocator()->allocate();
+ var.allocator()->allocate();
+ beta.allocator()->allocate();
+ gamma.allocator()->allocate();
+
+ BOOST_TEST(!src.info()->is_resizable());
+ BOOST_TEST(!dst.info()->is_resizable());
+ BOOST_TEST(!mean.info()->is_resizable());
+ BOOST_TEST(!var.info()->is_resizable());
+ BOOST_TEST(!beta.info()->is_resizable());
+ BOOST_TEST(!gamma.info()->is_resizable());
+
+ // Fill tensors
+ if(dt == DataType::F32)
+ {
+ float min_bound = 0.f;
+ float max_bound = 0.f;
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<float>();
+ std::uniform_real_distribution<> distribution(min_bound, max_bound);
+ std::uniform_real_distribution<> distribution_var(0, max_bound);
+ library->fill(CLAccessor(src), distribution, 0);
+ library->fill(CLAccessor(mean), distribution, 1);
+ library->fill(CLAccessor(var), distribution_var, 0);
+ library->fill(CLAccessor(beta), distribution, 3);
+ library->fill(CLAccessor(gamma), distribution, 4);
+ }
+ else
+ {
+ int min_bound = 0;
+ int max_bound = 0;
+ if(dt == DataType::QS8)
+ {
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
+ }
+ else
+ {
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int16_t>(fixed_point_position);
+ }
+ std::uniform_int_distribution<> distribution(min_bound, max_bound);
+ std::uniform_int_distribution<> distribution_var(0, max_bound);
+ library->fill(CLAccessor(src), distribution, 0);
+ library->fill(CLAccessor(mean), distribution, 1);
+ library->fill(CLAccessor(var), distribution_var, 0);
+ library->fill(CLAccessor(beta), distribution, 3);
+ library->fill(CLAccessor(gamma), distribution, 4);
+ }
+
+ // Compute function
+ norm.run();
+
+ return dst;
+}
+} // namespace
+
+#ifndef DOXYGEN_SKIP_THIS
+BOOST_AUTO_TEST_SUITE(CL)
+BOOST_AUTO_TEST_SUITE(BatchNormalizationLayer)
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(Configuration, RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make({ DataType::QS8, DataType::QS16, DataType::F32 }), obj, dt)
+{
+ // Set fixed point position data type allowed
+ int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
+
+ // Create tensors
+ CLTensor src = create_tensor<CLTensor>(obj.shape0, dt, 1, fixed_point_position);
+ CLTensor dst = create_tensor<CLTensor>(obj.shape0, dt, 1, fixed_point_position);
+ CLTensor mean = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
+ CLTensor var = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
+ CLTensor beta = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
+ CLTensor gamma = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
+
+ BOOST_TEST(src.info()->is_resizable());
+ BOOST_TEST(dst.info()->is_resizable());
+ BOOST_TEST(mean.info()->is_resizable());
+ BOOST_TEST(var.info()->is_resizable());
+ BOOST_TEST(beta.info()->is_resizable());
+ BOOST_TEST(gamma.info()->is_resizable());
+
+ // Create and configure function
+ CLBatchNormalizationLayer norm;
+ norm.configure(&src, &dst, &mean, &var, &beta, &gamma, obj.epsilon);
+
+ // Validate valid region
+ const ValidRegion valid_region = shape_to_valid_region(obj.shape0);
+ const ValidRegion valid_region_vec = shape_to_valid_region(obj.shape1);
+ validate(src.info()->valid_region(), valid_region);
+ validate(dst.info()->valid_region(), valid_region);
+ validate(mean.info()->valid_region(), valid_region_vec);
+ validate(var.info()->valid_region(), valid_region_vec);
+ validate(beta.info()->valid_region(), valid_region_vec);
+ validate(gamma.info()->valid_region(), valid_region_vec);
+}
+
+BOOST_AUTO_TEST_SUITE(Float)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(Random,
+ RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::F32),
+ obj, dt)
+{
+ // Compute function
+ CLTensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, tolerance_f, 0);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(Quantized)
+
+BOOST_AUTO_TEST_SUITE(QS8)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(Random,
+ RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(1, 6),
+ obj, dt, fixed_point_position)
+{
+ // Compute function
+ CLTensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, tolerance_qs8, 0);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(QS16)
+BOOST_DATA_TEST_CASE(Random,
+ RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS16) * boost::unit_test::data::xrange(1, 14),
+ obj, dt, fixed_point_position)
+{
+ // Compute function
+ CLTensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, tolerance_qs16, 0);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/FixedPoint.h b/tests/validation/FixedPoint.h
index 261fcd6df6..ab6f14a49b 100644
--- a/tests/validation/FixedPoint.h
+++ b/tests/validation/FixedPoint.h
@@ -768,7 +768,7 @@ struct functions
x2 = shift_right(mul(x2, three_minus_dx), 1);
}
- return (shift < 0) ? shift_left(x2, -shift >> 1) : shift_right(x2, shift >> 1);
+ return (shift < 0) ? shift_left(x2, (-shift) >> 1) : shift_right(x2, shift >> 1);
}
/** Calculate the hyperbolic tangent of a fixed point number
*
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 38e3751db4..d825c889b6 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -100,7 +100,14 @@ Tensor compute_reference_batch_normalization_layer(const TensorShape &shape0, co
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
+ if(dt == DataType::QS8)
+ {
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
+ }
+ else
+ {
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int16_t>(fixed_point_position);
+ }
std::uniform_int_distribution<> distribution(min_bound, max_bound);
std::uniform_int_distribution<> distribution_var(0, max_bound);
library->fill(NEAccessor(src), distribution, 0);
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index 65705b17de..acf010e7b7 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -505,7 +505,14 @@ RawTensor Reference::compute_reference_batch_normalization_layer(const TensorSha
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
+ if(dt == DataType::QS8)
+ {
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
+ }
+ else
+ {
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int16_t>(fixed_point_position);
+ }
std::uniform_int_distribution<> distribution(min_bound, max_bound);
std::uniform_int_distribution<> distribution_var(0, max_bound);
library->fill(ref_src, distribution, 0);
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 27c50cf6d2..9e6f5cf5d1 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -974,17 +974,17 @@ void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor
for(int l = 0; l < cols; ++l)
{
const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
- fixed_point_arithmetic::fixed_point<T> in_qs8(in[pos], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> var_qs8(var[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> mean_qs8(mean[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> beta_qs8(beta[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> gamma_qs8(gamma[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> epsilon_qs8(epsilon, fixed_point_position);
-
- auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs8 + epsilon_qs8);
- auto numerator = in_qs8 - mean_qs8;
+ fixed_point_arithmetic::fixed_point<T> in_qs(in[pos], fixed_point_position, true);
+ fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
+ fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
+ fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
+ fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
+ fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
+
+ auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
+ auto numerator = in_qs - mean_qs;
auto x_bar = numerator * denominator;
- x_bar = beta_qs8 + x_bar * gamma_qs8;
+ x_bar = beta_qs + x_bar * gamma_qs;
out[pos] = x_bar.raw();
}
}