aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/DepthwiseConvolutionLayerNative.cpp305
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h121
2 files changed, 426 insertions, 0 deletions
diff --git a/tests/validation/CL/DepthwiseConvolutionLayerNative.cpp b/tests/validation/CL/DepthwiseConvolutionLayerNative.cpp
new file mode 100644
index 0000000000..bbcded9267
--- /dev/null
+++ b/tests/validation/CL/DepthwiseConvolutionLayerNative.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h"
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+// Create function for CLDepthwiseConvolutionLayerNativeKernel
+using CLDepthwiseConvolutionLayerNative = CLSynthetizeFunction<CLDepthwiseConvolutionLayerNativeKernel>;
+
+// Fixture for CLDepthwiseConvolutionLayerNative
+template <typename T>
+using CLDepthwiseConvolutionLayerNativeFixture = DepthwiseConvolutionLayerNativeConfigurableValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayerNative, T>;
+
+namespace
+{
+// *INDENT-OFF*
+// clang-format off
+RelativeTolerance<float> rel_tolerance_f32(0.001f);
+constexpr float abs_tolerance_f32(0.0001f);
+
+RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.01));
+
+/** Width values to test - Precommit */
+const auto width_values_precommit = framework::dataset::make("width", { 37U } );
+
+/** Width values to test - Nightly */
+const auto width_values_nightly = framework::dataset::make("width", { 53U, 47U } );
+
+/** Height values to test - Precommit */
+const auto height_values_precommit = framework::dataset::make("height", { 19U } );
+
+/** Height values to test - Nightly */
+const auto height_values_nightly = framework::dataset::make("height", { 39U, 43U } );
+
+/** Channel values to test - Precommit */
+const auto channel_values_precommit = framework::dataset::make("channels", { 15U });
+
+/** Channel values to test - Nightly */
+const auto channel_values_nightly = framework::dataset::make("channels", { 33U, 19U });
+
+/** Batch values to test - Precommit */
+const auto batch_values_precommit = framework::dataset::make("batch", { 1U, 2U });
+
+/** Batch values to test - Nightly */
+const auto batch_values_nightly = framework::dataset::make("batch", { 1U, 3U });
+
+/** Kernel size values to test - Precommit */
+const auto kernel_sz_values_precommit = framework::dataset::make("kernel_size", { Size2D(1U, 1U), Size2D(1U, 3U), Size2D(5U, 5U) });
+
+/** Kernel size values to test - Nightly */
+const auto kernel_sz_values_nightly = framework::dataset::make("kernel_size", { Size2D(3U, 5U), Size2D(5U, 1U), Size2D(1U, 7U), Size2D(9U, 7U) });
+
+/** Depth multiplier values to test - All */
+const auto depth_multiplier_values = framework::dataset::make("depth_multiplier", {3U});
+
+/** Dilation values to test - All */
+const auto dilation_values = framework::dataset::make("dilation", { Size2D(1U, 1U), Size2D(3U, 3U) });
+
+/** Stride values to test - All */
+const auto stride_values = framework::dataset::make("stride", { Size2D(1U, 1U), Size2D(3U, 2U) });
+
+/** Padding values to test - All */
+const auto padding_valid_values = framework::dataset::make("padding_valid", { true, false });
+
+/** Data type values to test - All */
+const auto data_type_values = framework::dataset::make("data_type", { DataType::F32, DataType::F16 });
+
+/** Data layout values to test - All */
+const auto data_layout_values = framework::dataset::make("data_layout", { DataLayout::NHWC });
+
+/** N0 values to test - Precommit */
+const auto n0_values_precommit = framework::dataset::make("N0", {2, 4});
+
+/** N0 values to test - Nightly */
+const auto n0_values_nightly = framework::dataset::make("N0", {3, 8});
+
+/** Activation values to test */
+const auto act_values = framework::dataset::make("Activation",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
+});
+
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(DepthwiseConvolutionLayerNative)
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_precommit,
+ height_values_precommit),
+ channel_values_precommit),
+ batch_values_precommit),
+ kernel_sz_values_precommit),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F32)),
+ data_layout_values),
+ act_values),
+ n0_values_precommit))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_nightly,
+ height_values_nightly),
+ channel_values_nightly),
+ batch_values_nightly),
+ kernel_sz_values_nightly),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F32)),
+ data_layout_values),
+ act_values),
+ n0_values_nightly))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
+}
+TEST_SUITE_END() // FP32
+
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_precommit,
+ height_values_precommit),
+ channel_values_precommit),
+ batch_values_precommit),
+ kernel_sz_values_precommit),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F16)),
+ data_layout_values),
+ act_values),
+ n0_values_precommit))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_nightly,
+ height_values_nightly),
+ channel_values_nightly),
+ batch_values_nightly),
+ kernel_sz_values_nightly),
+ framework::dataset::make("depth_multiplier", 1)),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F16)),
+ data_layout_values),
+ act_values),
+ n0_values_nightly))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Float
+TEST_SUITE(DepthMultiplier)
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_precommit,
+ height_values_precommit),
+ channel_values_precommit),
+ batch_values_precommit),
+ kernel_sz_values_precommit),
+ depth_multiplier_values),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F32)),
+ data_layout_values),
+ act_values),
+ framework::dataset::make("N0", 1)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_nightly,
+ height_values_nightly),
+ channel_values_nightly),
+ batch_values_nightly),
+ kernel_sz_values_nightly),
+ depth_multiplier_values),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F32)),
+ data_layout_values),
+ act_values),
+ framework::dataset::make("N0", 1)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
+}
+TEST_SUITE_END() // FP32
+
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_precommit,
+ height_values_precommit),
+ channel_values_precommit),
+ batch_values_precommit),
+ kernel_sz_values_precommit),
+ depth_multiplier_values),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F16)),
+ data_layout_values),
+ act_values),
+ framework::dataset::make("N0", 1)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerNativeFixture<half>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ width_values_nightly,
+ height_values_nightly),
+ channel_values_nightly),
+ batch_values_nightly),
+ kernel_sz_values_nightly),
+ depth_multiplier_values),
+ dilation_values),
+ stride_values),
+ padding_valid_values),
+ framework::dataset::make("DataType", DataType::F16)),
+ data_layout_values),
+ act_values),
+ framework::dataset::make("N0", 1)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // DepthMultiplier
+TEST_SUITE_END() // DepthwiseConvolutionLayerNative
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index a3ac49eef1..2c9b31866b 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -302,6 +302,127 @@ protected:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DepthwiseConvolutionLayerNativeConfigurableValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type,
+ DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0)
+ {
+ const TensorShape src_shape(width, height, channel, batch);
+ const TensorShape weights_shape(kernel_size.width, kernel_size.height, channel * depth_multiplier);
+ const TensorShape biases_shape(weights_shape.z());
+
+ PadStrideInfo conv_info;
+ if(padding_valid)
+ {
+ conv_info = PadStrideInfo();
+ }
+ else
+ {
+ conv_info = calculate_same_pad(src_shape, weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, dilation);
+ }
+
+ _target = compute_target(src_shape, weights_shape, biases_shape, conv_info, dilation, depth_multiplier, data_type, data_layout, act_info, n0);
+ _reference = compute_reference(src_shape, weights_shape, biases_shape, conv_info, dilation, depth_multiplier, data_type, act_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F16:
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape biases_shape, PadStrideInfo &conv_info, Size2D dilation,
+ unsigned int depth_multiplier, const DataType data_type, const DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0)
+ {
+ if(data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType biases = create_tensor<TensorType>(biases_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(TensorShape(), data_type, 1, QuantizationInfo(), data_layout);
+
+ DWCWeightsKernelInfo dwc_weights_info;
+ dwc_weights_info.n0 = n0;
+
+ DWCKernelInfo dwc_info;
+ dwc_info.activation_info = act_info;
+
+ // Create Depthwise Convolution configure function
+ FunctionType dwc;
+ dwc.configure(&src, &weights, &biases, &dst, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(biases.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ biases.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!biases.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src), 0);
+ fill(AccessorType(weights), 1);
+ fill(AccessorType(biases), 2);
+
+ // Compute function
+ dwc.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &biases_shape, const PadStrideInfo &conv_info,
+ const Size2D &dilation, unsigned int depth_multiplier, const DataType data_type, const ActivationLayerInfo &act_info)
+ {
+ SimpleTensor<T> src{ input_shape, data_type };
+ SimpleTensor<T> weights{ weights_shape, data_type };
+ SimpleTensor<T> biases{ biases_shape, data_type };
+
+ fill(src, 0);
+ fill(weights, 1);
+ fill(biases, 2);
+
+ const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(input_shape, 1, data_type), TensorInfo(weights_shape, 1, data_type), conv_info,
+ depth_multiplier, dilation);
+ return reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, conv_info, depth_multiplier, dilation), act_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public: