aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/NEON/InstanceNormalizationLayer.cpp140
-rw-r--r--tests/validation/fixtures/InstanceNormalizationLayerFixture.h147
-rw-r--r--tests/validation/reference/InstanceNormalizationLayer.cpp96
-rw-r--r--tests/validation/reference/InstanceNormalizationLayer.h44
4 files changed, 427 insertions, 0 deletions
diff --git a/tests/validation/NEON/InstanceNormalizationLayer.cpp b/tests/validation/NEON/InstanceNormalizationLayer.cpp
new file mode 100644
index 0000000000..8356613368
--- /dev/null
+++ b/tests/validation/NEON/InstanceNormalizationLayer.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEInstanceNormalizationLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/InstanceNormalizationLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+/** Tolerance for float operations */
+AbsoluteTolerance<float> tolerance_f32(0.001f);
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+AbsoluteTolerance<float> tolerance_f16(0.2f);
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(InstanceNormalizationLayer)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
+ }),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F16),
+ TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })),
+ input_info, output_info, expected)
+{
+ bool is_valid = bool(NEInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(false)
+ ));
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using NEInstanceNormalizationLayerFixture = InstanceNormalizationLayerValidationFixture<Tensor, Accessor, NEInstanceNormalizationLayer, T>;
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::Small4DShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("InPlace", { false, true })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::Large4DShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("InPlace", { false, true })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // FP32
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("InPlace", { false, true })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f16);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture<half>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("InPlace", { false, true })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+TEST_SUITE_END() // InstanceNormalizationLayer
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
new file mode 100644
index 0000000000..175ef2fb90
--- /dev/null
+++ b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_INSTANCENORMALIZATION_FIXTURE
+#define ARM_COMPUTE_TEST_INSTANCENORMALIZATION_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/InstanceNormalizationLayer.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class InstanceNormalizationLayerValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type, DataLayout data_layout, bool in_place)
+ {
+ _target = compute_target(shape, data_type, data_layout, in_place);
+ _reference = compute_reference(shape, data_type, data_layout);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ std::uniform_real_distribution<> distribution(1.f, 2.f);
+ library->fill(tensor, distribution, 0);
+ }
+
+ TensorType compute_target(TensorShape shape, DataType data_type, DataLayout data_layout, bool in_place)
+ {
+ if(data_layout == DataLayout::NHWC)
+ {
+ permute(shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> dist_gamma(1.f, 2.f);
+ std::uniform_real_distribution<float> dist_beta(-2.f, 2.f);
+ std::uniform_real_distribution<float> dist_epsilon(1e-16f, 1e-12f);
+
+ const float gamma = dist_gamma(gen);
+ const float beta = dist_beta(gen);
+ const float epsilon = dist_epsilon(gen);
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(), data_layout);
+
+ // Create and configure function
+ FunctionType instance_norm_func;
+ instance_norm_func.configure(&src, in_place ? nullptr : &dst, gamma, beta, epsilon);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ if(!in_place)
+ {
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ if(!in_place)
+ {
+ dst.allocator()->allocate();
+ }
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ if(!in_place)
+ {
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ instance_norm_func.run();
+
+ if(in_place)
+ {
+ return src;
+ }
+ else
+ {
+ return dst;
+ }
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, DataLayout data_layout)
+ {
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> dist_gamma(1.f, 2.f);
+ std::uniform_real_distribution<float> dist_beta(-2.f, 2.f);
+ std::uniform_real_distribution<float> dist_epsilon(1e-16f, 1e-12f);
+
+ const float gamma = dist_gamma(gen);
+ const float beta = dist_beta(gen);
+ const float epsilon = dist_epsilon(gen);
+
+ // Create reference
+ SimpleTensor<T> src{ shape, data_type };
+
+ // Fill reference
+ fill(src);
+
+ return reference::instance_normalization<T>(src, gamma, beta, epsilon);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_INSTANCENORMALIZATION_FIXTURE */
diff --git a/tests/validation/reference/InstanceNormalizationLayer.cpp b/tests/validation/reference/InstanceNormalizationLayer.cpp
new file mode 100644
index 0000000000..0e5c02aa99
--- /dev/null
+++ b/tests/validation/reference/InstanceNormalizationLayer.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "InstanceNormalizationLayer.h"
+
+#include "tests/validation/Helpers.h"
+
+#include <algorithm>
+#include <cmath>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> instance_normalization(const SimpleTensor<T> &src, float gamma, float beta, float epsilon)
+{
+ SimpleTensor<T> dst{ src.shape(), src.data_type() };
+
+ //NCHW
+ const size_t w_size = src.shape()[0];
+ const size_t h_size = src.shape()[1];
+ const size_t c_size = src.shape()[2];
+ const size_t n_size = src.shape()[3];
+
+ for(size_t n_i = 0; n_i < n_size; ++n_i)
+ {
+ for(size_t c_i = 0; c_i < c_size; ++c_i)
+ {
+ float sum_h_w = 0;
+ //Compute mean
+ for(size_t h_i = 0; h_i < h_size; ++h_i)
+ {
+ for(size_t w_i = 0; w_i < w_size; ++w_i)
+ {
+ sum_h_w += src[coord2index(src.shape(), Coordinates(w_i, h_i, c_i, n_i))];
+ }
+ }
+ const float mean_h_w = sum_h_w / (h_size * w_size);
+
+ //Compute variance
+ float partial_var_h_w = 0;
+ for(size_t h_i = 0; h_i < h_size; ++h_i)
+ {
+ for(size_t w_i = 0; w_i < w_size; ++w_i)
+ {
+ partial_var_h_w += std::pow(src[coord2index(src.shape(), Coordinates(w_i, h_i, c_i, n_i))] - mean_h_w, 2);
+ }
+ }
+ const float var_h_w = partial_var_h_w / (h_size * w_size);
+
+ //Apply mean
+ for(size_t h_i = 0; h_i < h_size; ++h_i)
+ {
+ for(size_t w_i = 0; w_i < w_size; ++w_i)
+ {
+ //Compute output
+ size_t index = coord2index(src.shape(), Coordinates(w_i, h_i, c_i, n_i));
+ dst[index] = (src[index] - mean_h_w) * gamma / std::sqrt(var_h_w + epsilon) + beta;
+ }
+ }
+ }
+ }
+ return dst;
+}
+
+template SimpleTensor<float> instance_normalization(const SimpleTensor<float> &src, float gamma, float beta, float epsilon);
+template SimpleTensor<half> instance_normalization(const SimpleTensor<half> &src, float gamma, float beta, float epsilon);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/InstanceNormalizationLayer.h b/tests/validation/reference/InstanceNormalizationLayer.h
new file mode 100644
index 0000000000..2926e09f1b
--- /dev/null
+++ b/tests/validation/reference/InstanceNormalizationLayer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_INSTANCENORMALIZATION_H__
+#define __ARM_COMPUTE_TEST_INSTANCENORMALIZATION_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> instance_normalization(const SimpleTensor<T> &src, float gamma, float beta, float epsilon);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_INSTANCENORMALIZATION_H__ */