aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/fixtures')
-rw-r--r--tests/validation/fixtures/AbsoluteDifferenceFixture.h114
-rw-r--r--tests/validation/fixtures/AccumulateFixture.h193
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h30
-rw-r--r--tests/validation/fixtures/AddMulAddFixture.h270
-rw-r--r--tests/validation/fixtures/ArgMinMaxFixture.h72
-rw-r--r--tests/validation/fixtures/ArithmeticDivisionFixture.h21
-rw-r--r--tests/validation/fixtures/ArithmeticOperationsFixture.h180
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFixture.h43
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h56
-rw-r--r--tests/validation/fixtures/BatchToSpaceLayerFixture.h59
-rw-r--r--tests/validation/fixtures/BitwiseAndFixture.h15
-rw-r--r--tests/validation/fixtures/BitwiseNotFixture.h11
-rw-r--r--tests/validation/fixtures/BitwiseOrFixture.h15
-rw-r--r--tests/validation/fixtures/BitwiseXorFixture.h15
-rw-r--r--tests/validation/fixtures/BoundingBoxTransformFixture.h15
-rw-r--r--tests/validation/fixtures/Box3x3Fixture.h117
-rw-r--r--tests/validation/fixtures/CannyEdgeFixture.h121
-rw-r--r--tests/validation/fixtures/CastFixture.h22
-rw-r--r--tests/validation/fixtures/ChannelCombineFixture.h266
-rw-r--r--tests/validation/fixtures/ChannelExtractFixture.h192
-rw-r--r--tests/validation/fixtures/ChannelShuffleLayerFixture.h11
-rw-r--r--tests/validation/fixtures/Col2ImFixture.h22
-rw-r--r--tests/validation/fixtures/ColorConvertFixture.h218
-rw-r--r--tests/validation/fixtures/ComparisonFixture.h19
-rw-r--r--tests/validation/fixtures/ComputeAllAnchorsFixture.h9
-rw-r--r--tests/validation/fixtures/ConcatenateLayerFixture.h24
-rw-r--r--tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h22
-rw-r--r--tests/validation/fixtures/ConvolutionFixture.h235
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h619
-rw-r--r--tests/validation/fixtures/CopyFixture.h13
-rw-r--r--tests/validation/fixtures/CropResizeFixture.h12
-rw-r--r--tests/validation/fixtures/DeconvolutionLayerFixture.h134
-rw-r--r--tests/validation/fixtures/DepthConvertLayerFixture.h19
-rw-r--r--tests/validation/fixtures/DepthToSpaceLayerFixture.h16
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h774
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h11
-rw-r--r--tests/validation/fixtures/DerivativeFixture.h142
-rw-r--r--tests/validation/fixtures/DilateFixture.h117
-rw-r--r--tests/validation/fixtures/DirectConvolution3DFixture.h189
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerFixture.h213
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h257
-rw-r--r--tests/validation/fixtures/DropoutLayerFixture.h13
-rw-r--r--tests/validation/fixtures/ElementWiseUnaryFixture.h242
-rw-r--r--tests/validation/fixtures/ElementwiseOperationsFixture.h342
-rw-r--r--tests/validation/fixtures/ElementwiseUnaryFixture.h447
-rw-r--r--tests/validation/fixtures/EqualizeHistogramFixture.h106
-rw-r--r--tests/validation/fixtures/ErodeFixture.h117
-rw-r--r--tests/validation/fixtures/FFTFixture.h100
-rw-r--r--tests/validation/fixtures/FastCornersFixture.h138
-rw-r--r--tests/validation/fixtures/FillFixture.h3
-rw-r--r--tests/validation/fixtures/FlattenLayerFixture.h16
-rw-r--r--tests/validation/fixtures/FloorFixture.h11
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h495
-rw-r--r--tests/validation/fixtures/FuseBatchNormalizationFixture.h35
-rw-r--r--tests/validation/fixtures/GEMMFixture.h923
-rw-r--r--tests/validation/fixtures/GEMMInterleave4x4Fixture.h25
-rw-r--r--tests/validation/fixtures/GEMMLowpAssemblyFixture.h141
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h1148
-rw-r--r--tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h20
-rw-r--r--tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h20
-rw-r--r--tests/validation/fixtures/GEMMTranspose1xWFixture.h23
-rw-r--r--tests/validation/fixtures/GatherFixture.h23
-rw-r--r--tests/validation/fixtures/Gaussian3x3Fixture.h117
-rw-r--r--tests/validation/fixtures/Gaussian5x5Fixture.h117
-rw-r--r--tests/validation/fixtures/GaussianPyramidHalfFixture.h125
-rw-r--r--tests/validation/fixtures/HOGDescriptorFixture.h135
-rw-r--r--tests/validation/fixtures/HOGDetectorFixture.h138
-rw-r--r--tests/validation/fixtures/HOGMultiDetectionFixture.h193
-rw-r--r--tests/validation/fixtures/HarrisCornersFixture.h115
-rw-r--r--tests/validation/fixtures/HistogramFixture.h129
-rw-r--r--tests/validation/fixtures/Im2ColFixture.h22
-rw-r--r--tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h122
-rw-r--r--tests/validation/fixtures/InstanceNormalizationLayerFixture.h16
-rw-r--r--tests/validation/fixtures/IntegralImageFixture.h106
-rw-r--r--tests/validation/fixtures/L2NormalizeLayerFixture.h16
-rw-r--r--tests/validation/fixtures/LSTMLayerFixture.h132
-rw-r--r--tests/validation/fixtures/LaplacianPyramidFixture.h154
-rw-r--r--tests/validation/fixtures/LaplacianReconstructFixture.h104
-rw-r--r--tests/validation/fixtures/LocallyConnectedFixture.h136
-rw-r--r--tests/validation/fixtures/LogicalFixture.h176
-rw-r--r--tests/validation/fixtures/MagnitudeFixture.h123
-rw-r--r--tests/validation/fixtures/MatMulFixture.h612
-rw-r--r--tests/validation/fixtures/MatMulKernelFixture.h390
-rw-r--r--tests/validation/fixtures/MaxUnpoolingLayerFixture.h162
-rw-r--r--tests/validation/fixtures/MeanStdDevFixture.h110
-rw-r--r--tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h47
-rw-r--r--tests/validation/fixtures/Median3x3Fixture.h117
-rw-r--r--tests/validation/fixtures/MinMaxLocationFixture.h122
-rw-r--r--tests/validation/fixtures/NonLinearFilterFixture.h119
-rw-r--r--tests/validation/fixtures/NonMaxSuppressionFixture.h19
-rw-r--r--tests/validation/fixtures/NormalizationLayerFixture.h17
-rw-r--r--tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h31
-rw-r--r--tests/validation/fixtures/OpticalFlowFixture.h186
-rw-r--r--tests/validation/fixtures/PadLayerFixture.h11
-rw-r--r--tests/validation/fixtures/PermuteFixture.h11
-rw-r--r--tests/validation/fixtures/PhaseFixture.h122
-rw-r--r--tests/validation/fixtures/PixelWiseMultiplicationFixture.h128
-rw-r--r--tests/validation/fixtures/Pooling3dLayerFixture.h164
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h97
-rw-r--r--tests/validation/fixtures/PriorBoxLayerFixture.h15
-rw-r--r--tests/validation/fixtures/QLSTMLayerNormalizationFixture.h7
-rw-r--r--tests/validation/fixtures/QuantizationLayerFixture.h12
-rw-r--r--tests/validation/fixtures/RNNLayerFixture.h32
-rw-r--r--tests/validation/fixtures/ROIAlignLayerFixture.h17
-rw-r--r--tests/validation/fixtures/ROIPoolingLayerFixture.h199
-rw-r--r--tests/validation/fixtures/RangeFixture.h13
-rw-r--r--tests/validation/fixtures/ReduceMeanFixture.h62
-rw-r--r--tests/validation/fixtures/ReductionOperationFixture.h34
-rw-r--r--tests/validation/fixtures/RemapFixture.h134
-rw-r--r--tests/validation/fixtures/ReorderFixture.h123
-rw-r--r--tests/validation/fixtures/ReorgLayerFixture.h11
-rw-r--r--tests/validation/fixtures/ReshapeLayerFixture.h53
-rw-r--r--tests/validation/fixtures/ReverseFixture.h64
-rw-r--r--tests/validation/fixtures/ScaleFixture.h148
-rw-r--r--tests/validation/fixtures/ScatterLayerFixture.h254
-rw-r--r--tests/validation/fixtures/ScharrFixture.h15
-rw-r--r--tests/validation/fixtures/SelectFixture.h19
-rw-r--r--tests/validation/fixtures/SliceOperationsFixtures.h21
-rw-r--r--tests/validation/fixtures/SobelFixture.h192
-rw-r--r--tests/validation/fixtures/SoftmaxLayerFixture.h41
-rw-r--r--tests/validation/fixtures/SpaceToBatchFixture.h21
-rw-r--r--tests/validation/fixtures/SpaceToDepthFixture.h23
-rw-r--r--tests/validation/fixtures/SplitFixture.h13
-rw-r--r--tests/validation/fixtures/StackLayerFixture.h45
-rw-r--r--tests/validation/fixtures/TableLookupFixture.h122
-rw-r--r--tests/validation/fixtures/ThresholdFixture.h107
-rw-r--r--tests/validation/fixtures/TileFixture.h11
-rw-r--r--tests/validation/fixtures/TransposeFixture.h21
-rw-r--r--tests/validation/fixtures/UNIT/ContextFixture.h148
-rw-r--r--tests/validation/fixtures/UNIT/DynamicTensorFixture.h35
-rw-r--r--tests/validation/fixtures/UNIT/MemoryManagerFixture.h14
-rw-r--r--tests/validation/fixtures/UNIT/QueueFixture.h144
-rw-r--r--tests/validation/fixtures/UNIT/TensorFixture.h424
-rw-r--r--tests/validation/fixtures/UNIT/TensorPackFixture.h184
-rw-r--r--tests/validation/fixtures/UNIT/WeightsRetentionFixture.h27
-rw-r--r--tests/validation/fixtures/UnstackFixture.h7
-rw-r--r--tests/validation/fixtures/UpsampleLayerFixture.h148
-rw-r--r--tests/validation/fixtures/WarpAffineFixture.h121
-rw-r--r--tests/validation/fixtures/WarpPerspectiveFixture.h134
-rw-r--r--tests/validation/fixtures/WeightsReshapeFixture.h31
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h331
-rw-r--r--tests/validation/fixtures/YOLOLayerFixture.h162
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h246
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h411
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h273
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h297
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h188
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/ActivationFixture.h207
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h186
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h171
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/MulFixture.h239
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h137
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h272
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h158
154 files changed, 11881 insertions, 8575 deletions
diff --git a/tests/validation/fixtures/AbsoluteDifferenceFixture.h b/tests/validation/fixtures/AbsoluteDifferenceFixture.h
deleted file mode 100644
index b725304a4d..0000000000
--- a/tests/validation/fixtures/AbsoluteDifferenceFixture.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_ABSOLUTE_DIFFERENCE_FIXTURE
-#define ARM_COMPUTE_TEST_ABSOLUTE_DIFFERENCE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/AbsoluteDifference.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class AbsoluteDifferenceValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type)
- {
- _target = compute_target(shape, data_type0, data_type1, output_data_type);
- _reference = compute_reference(shape, data_type0, data_type1, output_data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- library->fill_tensor_uniform(tensor, i);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
- {
- // Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1);
- TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1);
- TensorType dst = create_tensor<TensorType>(shape, output_data_type, 1);
-
- // Create and configure function
- FunctionType abs_diff;
- abs_diff.configure(&ref_src1, &ref_src2, &dst);
-
- ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- ref_src1.allocator()->allocate();
- ref_src2.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(ref_src1), 0);
- fill(AccessorType(ref_src2), 1);
-
- // Compute function
- abs_diff.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
- {
- // Create reference
- SimpleTensor<T> ref_src1{ shape, data_type0, 1 };
- SimpleTensor<T> ref_src2{ shape, data_type1, 1 };
-
- // Fill reference
- fill(ref_src1, 0);
- fill(ref_src2, 1);
-
- return reference::absolute_difference<T>(ref_src1, ref_src2, output_data_type);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ABSOLUTE_DIFFERENCE_FIXTURE */
diff --git a/tests/validation/fixtures/AccumulateFixture.h b/tests/validation/fixtures/AccumulateFixture.h
deleted file mode 100644
index 091000175c..0000000000
--- a/tests/validation/fixtures/AccumulateFixture.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_ACCUMULATE_FIXTURE
-#define ARM_COMPUTE_TEST_ACCUMULATE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/Accumulate.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class AccumulateBaseValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, DataType output_data_type)
- {
- _target = compute_target(shape, data_type, output_data_type);
- _reference = compute_reference(shape, data_type, output_data_type);
- }
-
-protected:
- template <typename U, typename D>
- void fill(U &&tensor, int i, D max)
- {
- library->fill_tensor_uniform(tensor, i, static_cast<D>(0), max);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, DataType output_data_type)
- {
- // Create tensors
- TensorType ref_src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, output_data_type);
-
- // Create and configure function
- FunctionType accum;
- accum_conf(accum, ref_src, dst);
-
- ARM_COMPUTE_EXPECT(ref_src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- ref_src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!ref_src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- const T1 max = std::numeric_limits<T1>::max();
-
- // Fill tensors
- fill(AccessorType(ref_src), 0, max);
- fill(AccessorType(dst), 1, static_cast<T2>(max));
-
- // Compute function
- accum.run();
-
- return dst;
- }
-
- SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType data_type, DataType output_data_type)
- {
- // Create reference
- SimpleTensor<T1> ref_src{ shape, data_type };
-
- const T1 max = std::numeric_limits<T1>::max();
-
- // Fill reference
- fill(ref_src, 0, max);
-
- return accum_ref(ref_src, output_data_type);
- }
-
- virtual void accum_conf(FunctionType &func, const TensorType &input, TensorType &accum) = 0;
-
- virtual SimpleTensor<T2> accum_ref(const SimpleTensor<T1> &input, DataType output_data_type) = 0;
-
- TensorType _target{};
- SimpleTensor<T2> _reference{};
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class AccumulateValidationFixture : public AccumulateBaseValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, DataType output_data_type)
- {
- AccumulateBaseValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, data_type, output_data_type);
- }
-
- virtual void accum_conf(FunctionType &func, const TensorType &input, TensorType &accum) override
- {
- func.configure(&input, &accum);
- }
-
- virtual SimpleTensor<T2> accum_ref(const SimpleTensor<T1> &input, DataType output_data_type) override
- {
- return reference::accumulate<T1, T2>(input, output_data_type);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class AccumulateWeightedValidationFixture : public AccumulateBaseValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, DataType output_data_type)
- {
- std::mt19937 gen(library->seed());
- std::uniform_real_distribution<> float_dist(0, 1);
-
- _alpha = float_dist(gen);
-
- AccumulateBaseValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, data_type, output_data_type);
- }
-
- virtual void accum_conf(FunctionType &func, const TensorType &input, TensorType &accum) override
- {
- func.configure(&input, _alpha, &accum);
- }
-
- virtual SimpleTensor<T2> accum_ref(const SimpleTensor<T1> &input, DataType output_data_type) override
- {
- return reference::accumulate_weighted<T1, T2>(input, _alpha, output_data_type);
- }
-
- float _alpha{ 0.f };
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class AccumulateSquaredValidationFixture : public AccumulateBaseValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, DataType output_data_type)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint32_t> int_dist(0, 15);
-
- _shift = int_dist(gen);
-
- AccumulateBaseValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, data_type, output_data_type);
- }
-
- virtual void accum_conf(FunctionType &func, const TensorType &input, TensorType &accum) override
- {
- func.configure(&input, _shift, &accum);
- }
-
- virtual SimpleTensor<T2> accum_ref(const SimpleTensor<T1> &input, DataType output_data_type) override
- {
- return reference::accumulate_squared<T1, T2>(input, _shift, output_data_type);
- }
-
- uint32_t _shift{ 0U };
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ACCUMULATE_FIXTURE */
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index 551ee2d8cf..a24ba8913e 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_ACTIVATIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_ACTIVATIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -47,12 +47,7 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ActivationValidationGenericFixture : public framework::Fixture
{
public:
- ActivationValidationGenericFixture()
- : _target(parameters->get_ctx<TensorType>())
- {
- }
- template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
{
ActivationLayerInfo info(function, alpha_beta, alpha_beta);
@@ -120,29 +115,28 @@ protected:
TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info)
{
- auto ctx = parameters->get_ctx<TensorType>();
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, DataLayout::NCHW, ctx);
- TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info, DataLayout::NCHW, ctx);
+ TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, DataLayout::NCHW);
+ TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info, DataLayout::NCHW);
// Create and configure function
- FunctionType act_layer(ctx);
+ FunctionType act_layer;
TensorType *dst_ptr = _in_place ? nullptr : &dst;
act_layer.configure(&src, dst_ptr, info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
if(!_in_place)
{
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
}
// Fill tensors
@@ -234,7 +228,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ActivationValidationFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type)
{
ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo());
@@ -245,7 +238,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ActivationValidationQuantizedFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
{
ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, quantization_info);
@@ -255,4 +247,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_ACTIVATIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/AddMulAddFixture.h b/tests/validation/fixtures/AddMulAddFixture.h
new file mode 100644
index 0000000000..d13fef2f02
--- /dev/null
+++ b/tests/validation/fixtures/AddMulAddFixture.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/ArithmeticOperations.h"
+#include "tests/validation/reference/DequantizationLayer.h"
+#include "tests/validation/reference/PixelWiseMultiplication.h"
+#include "tests/validation/reference/QuantizationLayer.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class AddMulAddGenericFixture : public framework::Fixture
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out)
+ {
+ compute_target(shape, data_type, act_info, interm_out);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, DataType data_type)
+ {
+ switch(data_type)
+ {
+ case DataType::F32:
+ library->fill_tensor_uniform(tensor, i, -10.f, 10.f);
+ break;
+ case DataType::F16:
+ library->fill_tensor_uniform(tensor, i, -1.f, 1.f);
+ break;
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+ }
+ }
+
+ void compute_target(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out)
+ {
+ TensorShape b_shape(shape.x());
+
+ // Create tensors
+ TensorType input1 = create_tensor<TensorType>(shape, data_type, 1, _input1_qinfo);
+ TensorType input2 = create_tensor<TensorType>(shape, data_type, 1, _input2_qinfo);
+ TensorType bn_mul = create_tensor<TensorType>(b_shape, data_type, 1, _bn_mul_qinfo);
+ TensorType bn_add = create_tensor<TensorType>(b_shape, data_type, 1, _bn_add_qinfo);
+ TensorType add_output = create_tensor<TensorType>(shape, data_type, 1, _add_output_qinfo);
+ TensorType final_output = create_tensor<TensorType>(shape, data_type, 1, _final_output_qinfo);
+
+ // Create and configure function
+ FunctionType add_mul_add;
+ ARM_COMPUTE_ERROR_THROW_ON(add_mul_add.validate(input1.info(), input2.info(), bn_mul.info(),
+ bn_add.info(), interm_out ? add_output.info() : nullptr, final_output.info(),
+ ConvertPolicy::SATURATE, act_info));
+
+ add_mul_add.configure(&input1, &input2, &bn_mul, &bn_add, interm_out ? &add_output : nullptr,
+ &final_output, ConvertPolicy::SATURATE, act_info);
+
+ // Allocate tensors
+ input1.allocator()->allocate();
+ input2.allocator()->allocate();
+ bn_mul.allocator()->allocate();
+ bn_add.allocator()->allocate();
+
+ if(interm_out)
+ {
+ add_output.allocator()->allocate();
+ }
+
+ final_output.allocator()->allocate();
+
+ // Fill tensors
+ fill(AccessorType(input1), 0, data_type);
+ fill(AccessorType(input2), 1, data_type);
+ fill(AccessorType(bn_mul), 2, data_type);
+ fill(AccessorType(bn_add), 3, data_type);
+
+ // // Compute function
+ add_mul_add.run();
+
+ _target = std::move(final_output);
+
+ if(interm_out)
+ {
+ _interm_target = std::move(add_output);
+ }
+ }
+
+ TensorType _target{};
+ TensorType _interm_target{};
+ SimpleTensor<T> _reference{};
+ SimpleTensor<T> _interm_reference{};
+
+ QuantizationInfo _input1_qinfo{};
+ QuantizationInfo _input2_qinfo{};
+ QuantizationInfo _bn_mul_qinfo{};
+ QuantizationInfo _bn_add_qinfo{};
+ QuantizationInfo _add_output_qinfo{};
+ QuantizationInfo _final_output_qinfo{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out>
+class AddMulAddFloatValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>;
+
+ void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info)
+ {
+ Parent::setup(shape, data_type, act_info, interm_out);
+ compute_reference(shape, data_type, act_info);
+ }
+
+ // Compute Reference is moved outside of the generic fixture because with the quantized data types,
+ // it becomes a very different implementation with intermediate tensors' data types being always float.
+ // This way the reference calculations are more readable and the size of the classes will be smaller
+ // due to unrepeated fill() and target() methods.
+ void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info)
+ {
+ TensorShape b_shape(shape.x());
+
+ // Create reference
+ SimpleTensor<T> input1{ shape, data_type };
+ SimpleTensor<T> input2{ shape, data_type };
+ SimpleTensor<T> bn_mul{ b_shape, data_type };
+ SimpleTensor<T> bn_add{ b_shape, data_type };
+ SimpleTensor<T> add_output{ shape, data_type, 1 };
+
+ SimpleTensor<T> bn_mul_out{ shape, data_type };
+ SimpleTensor<T> bn_add_out{ shape, data_type };
+
+ // Fill reference
+ Parent::fill(input1, 0, data_type);
+ Parent::fill(input2, 1, data_type);
+ Parent::fill(bn_mul, 2, data_type);
+ Parent::fill(bn_add, 3, data_type);
+
+ reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, input1, input2, add_output, ConvertPolicy::SATURATE);
+ bn_mul_out = reference::pixel_wise_multiplication<T, T, T>(add_output, bn_mul, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, data_type);
+ reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, bn_mul_out, bn_add, bn_add_out, ConvertPolicy::SATURATE);
+
+ if(interm_out)
+ {
+ Parent::_interm_reference = std::move(add_output);
+ }
+
+ if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY)
+ {
+ Parent::_reference = reference::activation_layer(bn_add_out, act_info);
+ }
+ else
+ {
+ Parent::_reference = std::move(bn_add_out);
+ }
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out>
+class AddMulAddQuantizedValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>;
+
+ void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info,
+ QuantizationInfo input1_qinfo, QuantizationInfo input2_qinfo, QuantizationInfo bn_mul_qinfo,
+ QuantizationInfo bn_add_qinfo, QuantizationInfo add_output_qinfo, QuantizationInfo final_output_qinfo)
+ {
+ // Quantization arguments moved to class attributes to prevent long function declerations
+ Parent::_input1_qinfo = input1_qinfo;
+ Parent::_input2_qinfo = input2_qinfo;
+ Parent::_bn_mul_qinfo = bn_mul_qinfo;
+ Parent::_bn_add_qinfo = bn_add_qinfo;
+ Parent::_add_output_qinfo = add_output_qinfo;
+ Parent::_final_output_qinfo = final_output_qinfo;
+
+ Parent::setup(shape, data_type, act_info, interm_out);
+ compute_reference(shape, data_type, act_info);
+ }
+
+ // Compute Reference is moved outside of the generic fixture because with the quantized data types,
+ // it becomes a very different implementation with intermediate tensors' data types being always float.
+ // This way the reference calculations are more readable and the size of the classes will be smaller
+ // due to unrepeated fill() and target() methods.
+ void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info)
+ {
+ TensorShape b_shape(shape.x());
+
+ // Create reference
+ SimpleTensor<T> input1{ shape, data_type, 1, Parent::_input1_qinfo };
+ SimpleTensor<T> input2{ shape, data_type, 1, Parent::_input2_qinfo };
+ SimpleTensor<T> bn_mul{ b_shape, data_type, 1, Parent::_bn_mul_qinfo };
+ SimpleTensor<T> bn_add{ b_shape, data_type, 1, Parent::_bn_add_qinfo };
+
+ // Fill input tensors
+ Parent::fill(input1, 0, data_type);
+ Parent::fill(input2, 1, data_type);
+ Parent::fill(bn_mul, 2, data_type);
+ Parent::fill(bn_add, 3, data_type);
+
+ SimpleTensor<float> input1_dequantized = reference::dequantization_layer<float>(input1);
+ SimpleTensor<float> input2_dequantized = reference::dequantization_layer<float>(input2);
+ SimpleTensor<float> bn_mul_dequantized = reference::dequantization_layer<float>(bn_mul);
+ SimpleTensor<float> bn_add_dequantized = reference::dequantization_layer<float>(bn_add);
+
+ SimpleTensor<float> add_output_dequantized{ shape, DataType::F32 };
+ SimpleTensor<float> bn_add_out_dequantized{ shape, DataType::F32 };
+
+ reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, input1_dequantized, input2_dequantized, add_output_dequantized, ConvertPolicy::SATURATE);
+ SimpleTensor<float> bn_mul_out_dequantized = reference::pixel_wise_multiplication<float, float, float>(add_output_dequantized, bn_mul_dequantized, 1.f, ConvertPolicy::SATURATE,
+ RoundingPolicy::TO_NEAREST_UP, DataType::F32);
+ reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, bn_mul_out_dequantized, bn_add_dequantized, bn_add_out_dequantized, ConvertPolicy::SATURATE);
+
+ if(interm_out)
+ {
+ Parent::_interm_reference = reference::quantization_layer<float, T>(add_output_dequantized, data_type, Parent::_add_output_qinfo);
+ }
+
+ if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY)
+ {
+ SimpleTensor<T> ref = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo);
+ Parent::_reference = reference::activation_layer(ref, act_info);
+ }
+ else
+ {
+ Parent::_reference = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo);
+ }
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H
diff --git a/tests/validation/fixtures/ArgMinMaxFixture.h b/tests/validation/fixtures/ArgMinMaxFixture.h
index 2932ba4508..7a823568a8 100644
--- a/tests/validation/fixtures/ArgMinMaxFixture.h
+++ b/tests/validation/fixtures/ArgMinMaxFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 ARM Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,15 +42,14 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
class ArgMinMaxValidationBaseFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo q_info)
+ void setup(TensorShape shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo q_info)
{
- _target = compute_target(shape, data_type, axis, op, q_info);
- _reference = compute_reference(shape, data_type, axis, op, q_info);
+ _target = compute_target(shape, input_type, output_type, axis, op, q_info);
+ _reference = compute_reference(shape, input_type, output_type, axis, op, q_info);
}
protected:
@@ -59,10 +58,15 @@ protected:
{
switch(tensor.data_type())
{
- case DataType::F32:
case DataType::F16:
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, 0);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, 0);
break;
}
@@ -75,7 +79,7 @@ protected:
case DataType::QASYMM8:
{
std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, 0);
break;
@@ -83,7 +87,7 @@ protected:
case DataType::QASYMM8_SIGNED:
{
std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, 0);
break;
@@ -93,25 +97,25 @@ protected:
}
}
- TensorType compute_target(TensorShape &src_shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo q_info)
+ TensorType compute_target(TensorShape &src_shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo q_info)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(src_shape, data_type, 1, q_info);
- TensorType dst;
+ TensorType src = create_tensor<TensorType>(src_shape, input_type, 1, q_info);
+ TensorType dst = create_tensor<TensorType>(compute_output_shape(src_shape, axis), output_type, 1, q_info);
// Create and configure function
FunctionType arg_min_max_layer;
arg_min_max_layer.configure(&src, axis, &dst, op);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -122,41 +126,43 @@ protected:
return dst;
}
- SimpleTensor<int32_t> compute_reference(TensorShape &src_shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo q_info)
+ TensorShape compute_output_shape(const TensorShape &src_shape, int axis)
+ {
+ return arm_compute::misc::shape_calculator::compute_reduced_shape(src_shape, axis, false);
+ }
+
+ SimpleTensor<T2> compute_reference(TensorShape &src_shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo q_info)
{
// Create reference
- SimpleTensor<T> src{ src_shape, data_type, 1, q_info };
+ SimpleTensor<T1> src{ src_shape, input_type, 1, q_info };
// Fill reference
fill(src);
- TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(src_shape, axis, false);
- return reference::reduction_operation<T, int32_t>(src, output_shape, axis, op);
+ return reference::reduction_operation<T1, T2>(src, compute_output_shape(src_shape, axis), axis, op, output_type);
}
- TensorType _target{};
- SimpleTensor<int32_t> _reference{};
+ TensorType _target{};
+ SimpleTensor<T2> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ArgMinMaxValidationQuantizedFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class ArgMinMaxValidationQuantizedFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo quantization_info)
+ void setup(const TensorShape &shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo quantization_info)
{
- ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, quantization_info);
+ ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, input_type, output_type, axis, op, quantization_info);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ArgMinMaxValidationFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class ArgMinMaxValidationFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type, int axis, ReductionOperation op)
+ void setup(const TensorShape &shape, DataType input_type, DataType output_type, int axis, ReductionOperation op)
{
- ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, QuantizationInfo());
+ ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, input_type, output_type, axis, op, QuantizationInfo());
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/ArithmeticDivisionFixture.h b/tests/validation/fixtures/ArithmeticDivisionFixture.h
index cf9db9505b..e11a386130 100644
--- a/tests/validation/fixtures/ArithmeticDivisionFixture.h
+++ b/tests/validation/fixtures/ArithmeticDivisionFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticDivisionBroadcastValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type)
{
_target = compute_target(shape0, shape1, data_type);
@@ -55,7 +54,10 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(1.0f, 5.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(1.0f), T(5.0f) };
library->fill(tensor, distribution, i);
}
@@ -70,18 +72,18 @@ protected:
FunctionType add;
add.configure(&ref_src1, &ref_src2, &dst);
- ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
ref_src1.allocator()->allocate();
ref_src2.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(ref_src1), 0);
@@ -114,7 +116,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticDivisionValidationFixture : public ArithmeticDivisionBroadcastValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType data_type)
{
ArithmeticDivisionBroadcastValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type);
diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h
index 4a6b0bd3f3..0785af1151 100644
--- a/tests/validation/fixtures/ArithmeticOperationsFixture.h
+++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,15 +45,14 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticOperationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1,
- DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
+ void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy,
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool is_inplace)
{
- _op = op;
- _act_info = act_info;
- _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
- _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
+ _op = op;
+ _act_info = act_info;
+ _is_inplace = is_inplace;
+ _target = compute_target(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
+ _reference = compute_reference(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
}
protected:
@@ -63,30 +62,55 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
+ TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
// Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0);
- TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1);
- TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out);
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type, 1, qinfo0);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(out_shape, data_type, 1, qinfo_out);
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if(_is_inplace)
+ {
+ bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out);
+ bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if(src1_is_inplace)
+ {
+ actual_dst = &ref_src1;
+ }
+ else
+ {
+ actual_dst = &ref_src2;
+ }
+ }
// Create and configure function
FunctionType arith_op;
- arith_op.configure(&ref_src1, &ref_src2, &dst, convert_policy, _act_info);
+ arith_op.configure(&ref_src1, &ref_src2, actual_dst, convert_policy, _act_info);
- ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
// Allocate tensors
ref_src1.allocator()->allocate();
ref_src2.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
+
+ // If don't do in-place computation, still need to allocate original dst
+ if(!_is_inplace)
+ {
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
// Fill tensors
fill(AccessorType(ref_src1), 0);
@@ -95,17 +119,16 @@ protected:
// Compute function
arith_op.run();
- return dst;
+ return std::move(*actual_dst);
}
- SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1,
- DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
// Create reference
- SimpleTensor<T> ref_src1{ shape0, data_type0, 1, qinfo0 };
- SimpleTensor<T> ref_src2{ shape1, data_type1, 1, qinfo1 };
- SimpleTensor<T> ref_dst{ TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out };
+ SimpleTensor<T> ref_src1{ shape0, data_type, 1, qinfo0 };
+ SimpleTensor<T> ref_src2{ shape1, data_type, 1, qinfo1 };
+ SimpleTensor<T> ref_dst{ TensorShape::broadcast_shape(shape0, shape1), data_type, 1, qinfo_out };
// Fill reference
fill(ref_src1, 0);
@@ -119,17 +142,17 @@ protected:
SimpleTensor<T> _reference{};
reference::ArithmeticOperation _op{ reference::ArithmeticOperation::ADD };
ActivationLayerInfo _act_info{};
+ bool _is_inplace{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ArithmeticAdditionBroadcastValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -137,11 +160,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticAdditionValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -149,11 +171,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticAdditionBroadcastValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -161,11 +182,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticAdditionValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -173,13 +193,23 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticAdditionValidationQuantizedFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy,
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticAdditionValidationQuantizedBroadcastFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out,
+ bool is_inplace)
+ {
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy,
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
}
};
@@ -187,12 +217,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticSubtractionBroadcastValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1,
- data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -200,12 +228,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticSubtractionBroadcastValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info,
+ bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1,
- data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -213,12 +240,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticSubtractionValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
- data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -226,12 +251,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticSubtractionValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
- data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -239,14 +262,23 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticSubtractionValidationQuantizedFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
+
+ {
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy,
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
+ }
+};
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticSubtractionValidationQuantizedBroadcastFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out,
+ bool is_inplace)
{
- ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
- data_type0, data_type1, output_data_type,
- convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy,
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index 359752f14e..54a0ed9e09 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BatchNormalizationLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
_data_type = dt;
@@ -59,10 +58,14 @@ protected:
template <typename U>
void fill(U &&src_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
{
- const float min_bound = -1.f;
- const float max_bound = 1.f;
- std::uniform_real_distribution<> distribution(min_bound, max_bound);
- std::uniform_real_distribution<> distribution_var(0, max_bound);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ const T min_bound = T(-1.f);
+ const T max_bound = T(1.f);
+ DistributionType distribution{ min_bound, max_bound };
+ DistributionType distribution_var{ T(0.f), max_bound };
+
library->fill(src_tensor, distribution, 0);
library->fill(mean_tensor, distribution, 1);
library->fill(var_tensor, distribution_var, 0);
@@ -73,7 +76,7 @@ protected:
else
{
// Fill with default value 0.f
- library->fill_tensor_value(beta_tensor, 0.f);
+ library->fill_tensor_value(beta_tensor, T(0.f));
}
if(_use_gamma)
{
@@ -82,7 +85,7 @@ protected:
else
{
// Fill with default value 1.f
- library->fill_tensor_value(gamma_tensor, 1.f);
+ library->fill_tensor_value(gamma_tensor, T(1.f));
}
}
@@ -107,12 +110,12 @@ protected:
TensorType *gamma_ptr = _use_gamma ? &gamma : nullptr;
norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon, act_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(gamma.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -122,12 +125,12 @@ protected:
beta.allocator()->allocate();
gamma.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!gamma.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), AccessorType(mean), AccessorType(var), AccessorType(beta), AccessorType(gamma));
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h
index 39c7d46114..161eeb0ef4 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename ConvolutionFuncti
class BatchNormalizationLayerFusionValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation,
bool use_conv_b, bool use_beta, bool use_gamma, float epsilon, DataType dt, DataLayout data_layout)
{
@@ -65,16 +64,19 @@ protected:
template <typename U>
void fill(U &&src, U &&w_tensor, U &&b_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
{
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
- std::uniform_real_distribution<> distribution_gz(0, 1.f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.f), T(1.f) };
+ DistributionType distribution_gz{ T(0.f), T(1.f) };
library->fill(src, distribution, 0);
library->fill(w_tensor, distribution, 1);
library->fill(mean_tensor, distribution, 2);
library->fill(var_tensor, distribution_gz, 3);
- _use_conv_b ? library->fill(b_tensor, distribution, 4) : library->fill_tensor_value(b_tensor, 0.f);
- _use_beta ? library->fill(beta_tensor, distribution, 5) : library->fill_tensor_value(beta_tensor, 0.f);
- _use_gamma ? library->fill(gamma_tensor, distribution, 6) : library->fill_tensor_value(gamma_tensor, 1.f);
+ _use_conv_b ? library->fill(b_tensor, distribution, 4) : library->fill_tensor_value(b_tensor, T(0.f));
+ _use_beta ? library->fill(beta_tensor, distribution, 5) : library->fill_tensor_value(beta_tensor, T(0.f));
+ _use_gamma ? library->fill(gamma_tensor, distribution, 6) : library->fill_tensor_value(gamma_tensor, T(1.f));
}
TensorType compute_target(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, float epsilon)
@@ -107,16 +109,16 @@ protected:
fuse_fn.configure(&conv_w, &bn_mean, &bn_var, &fused_w, &fused_b, conv_b_ptr, beta_ptr, gamma_ptr, epsilon);
conv_fn.configure(&src, &fused_w, &fused_b, &dst, info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(conv_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(conv_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(fused_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(fused_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(conv_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(conv_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_gamma.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(fused_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(fused_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -130,16 +132,16 @@ protected:
fused_b.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!conv_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!conv_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!fused_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!fused_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!conv_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!conv_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_gamma.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!fused_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!fused_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src),
diff --git a/tests/validation/fixtures/BatchToSpaceLayerFixture.h b/tests/validation/fixtures/BatchToSpaceLayerFixture.h
index 973f2ed27b..56a6109dbc 100644
--- a/tests/validation/fixtures/BatchToSpaceLayerFixture.h
+++ b/tests/validation/fixtures/BatchToSpaceLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#ifndef ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE
#define ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE
+#include "arm_compute/core/Helpers.h"
#include "tests/Globals.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
@@ -39,23 +40,26 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BatchToSpaceLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout)
+ void setup(const TensorShape &input_shape, const std::vector<int32_t> &block_shape, const CropInfo &crop_info, const TensorShape &output_shape, DataType data_type, DataLayout data_layout)
{
- _target = compute_target(input_shape, block_shape_shape, output_shape, data_type, data_layout);
- _reference = compute_reference(input_shape, block_shape_shape, output_shape, data_type);
+ _target = compute_target(input_shape, block_shape, crop_info, output_shape, data_type, data_layout);
+ _reference = compute_reference(input_shape, block_shape, crop_info, output_shape, data_type);
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
- TensorType compute_target(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape,
+ TensorType compute_target(TensorShape input_shape, const std::vector<int32_t> &block_shape, const CropInfo &crop_info, TensorShape output_shape,
DataType data_type, DataLayout data_layout)
{
+ ARM_COMPUTE_ERROR_ON(block_shape.size() != 2U); // Only support batch to 2D space (x, y) for now
if(data_layout == DataLayout::NHWC)
{
permute(input_shape, PermutationVector(2U, 0U, 1U));
@@ -63,64 +67,49 @@ protected:
}
// Create tensors
- TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType block_shape = create_tensor<TensorType>(block_shape_shape, DataType::S32);
- TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
// Create and configure function
FunctionType batch_to_space;
- batch_to_space.configure(&input, &block_shape, &output);
+ batch_to_space.configure(&input, block_shape.at(0), block_shape.at(1), &output, crop_info);
- ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
// Allocate tensors
input.allocator()->allocate();
- block_shape.allocator()->allocate();
output.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
// Fill tensors
fill(AccessorType(input), 0);
- {
- auto block_shape_data = AccessorType(block_shape);
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
- {
- static_cast<int32_t *>(block_shape_data.data())[i] = output_shape[i + idx_width] / input_shape[i + idx_width];
- }
- }
// Compute function
batch_to_space.run();
return output;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &block_shape_shape,
- const TensorShape &output_shape, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const std::vector<int32_t> &block_shape,
+ const CropInfo &crop_info, const TensorShape &output_shape, DataType data_type)
{
+ ARM_COMPUTE_ERROR_ON(block_shape.size() != 2U); // Only support batch to 2D space (x, y) for now
// Create reference
- SimpleTensor<T> input{ input_shape, data_type };
- SimpleTensor<int32_t> block_shape{ block_shape_shape, DataType::S32 };
+ SimpleTensor<T> input{ input_shape, data_type };
// Fill reference
fill(input, 0);
- for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
- {
- block_shape[i] = output_shape[i] / input_shape[i];
- }
// Compute reference
- return reference::batch_to_space(input, block_shape, output_shape);
+ return reference::batch_to_space(input, block_shape, crop_info, output_shape);
}
TensorType _target{};
SimpleTensor<T> _reference{};
};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/fixtures/BitwiseAndFixture.h b/tests/validation/fixtures/BitwiseAndFixture.h
index c1247de6c4..745a34058e 100644
--- a/tests/validation/fixtures/BitwiseAndFixture.h
+++ b/tests/validation/fixtures/BitwiseAndFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BitwiseAndValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
_target = compute_target(shape, data_type);
@@ -69,17 +68,17 @@ protected:
bitwise_and.configure(&src1, &src2, &dst);
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src1.allocator()->allocate();
src2.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src1), 0);
diff --git a/tests/validation/fixtures/BitwiseNotFixture.h b/tests/validation/fixtures/BitwiseNotFixture.h
index f90d0895aa..bdfd255156 100644
--- a/tests/validation/fixtures/BitwiseNotFixture.h
+++ b/tests/validation/fixtures/BitwiseNotFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BitwiseNotValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
_target = compute_target(shape, data_type);
@@ -68,14 +67,14 @@ protected:
bitwise_not.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/BitwiseOrFixture.h b/tests/validation/fixtures/BitwiseOrFixture.h
index d6ec5b8065..03560e0171 100644
--- a/tests/validation/fixtures/BitwiseOrFixture.h
+++ b/tests/validation/fixtures/BitwiseOrFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BitwiseOrValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
_target = compute_target(shape, data_type);
@@ -69,17 +68,17 @@ protected:
bitwise_or.configure(&src1, &src2, &dst);
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src1.allocator()->allocate();
src2.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src1), 0);
diff --git a/tests/validation/fixtures/BitwiseXorFixture.h b/tests/validation/fixtures/BitwiseXorFixture.h
index 8da2181fac..4872b231a5 100644
--- a/tests/validation/fixtures/BitwiseXorFixture.h
+++ b/tests/validation/fixtures/BitwiseXorFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BitwiseXorValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
_target = compute_target(shape, data_type);
@@ -69,17 +68,17 @@ protected:
bitwise_xor.configure(&src1, &src2, &dst);
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src1.allocator()->allocate();
src2.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src1), 0);
diff --git a/tests/validation/fixtures/BoundingBoxTransformFixture.h b/tests/validation/fixtures/BoundingBoxTransformFixture.h
index 5e4c598f73..03edaeab16 100644
--- a/tests/validation/fixtures/BoundingBoxTransformFixture.h
+++ b/tests/validation/fixtures/BoundingBoxTransformFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -102,7 +102,6 @@ class BoundingBoxTransformGenericFixture : public framework::Fixture
public:
using TDeltas = typename std::conditional<std::is_same<typename std::decay<T>::type, uint16_t>::value, uint8_t, T>::type;
- template <typename...>
void setup(TensorShape deltas_shape, const BoundingBoxTransformInfo &info, DataType data_type, QuantizationInfo deltas_qinfo)
{
const bool is_qasymm16 = data_type == DataType::QASYMM16;
@@ -157,17 +156,17 @@ protected:
FunctionType bbox_transform;
bbox_transform.configure(&boxes, &pred_boxes, &deltas, bbox_info);
- ARM_COMPUTE_EXPECT(deltas.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(boxes.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(pred_boxes.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(deltas.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(boxes.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(pred_boxes.info()->is_resizable());
// Allocate tensors
deltas.allocator()->allocate();
boxes.allocator()->allocate();
pred_boxes.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!deltas.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!boxes.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!deltas.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!boxes.info()->is_resizable());
// Fill tensors
TensorShape img_shape(bbox_info.scale() * bbox_info.img_width(), bbox_info.scale() * bbox_info.img_height());
@@ -215,7 +214,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BoundingBoxTransformFixture : public BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape deltas_shape, const BoundingBoxTransformInfo &info, DataType data_type)
{
BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(deltas_shape, info, data_type, QuantizationInfo());
@@ -228,7 +226,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class BoundingBoxTransformQuantizedFixture : public BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape deltas_shape, const BoundingBoxTransformInfo &info, DataType data_type, QuantizationInfo deltas_qinfo)
{
BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(deltas_shape, info, data_type, deltas_qinfo);
diff --git a/tests/validation/fixtures/Box3x3Fixture.h b/tests/validation/fixtures/Box3x3Fixture.h
deleted file mode 100644
index e851b3418e..0000000000
--- a/tests/validation/fixtures/Box3x3Fixture.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_BOX3X3_FIXTURE
-#define ARM_COMPUTE_TEST_BOX3X3_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Box3x3.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class Box3x3ValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, data_type, border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType box3x3;
- box3x3.configure(&src, &dst, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- box3x3.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::box3x3<T>(src, border_mode, constant_border_value);
- }
-
- BorderMode _border_mode{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_BOX3X3_FIXTURE */
diff --git a/tests/validation/fixtures/CannyEdgeFixture.h b/tests/validation/fixtures/CannyEdgeFixture.h
deleted file mode 100644
index d52b17e550..0000000000
--- a/tests/validation/fixtures/CannyEdgeFixture.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_CANNY_EDGE_FIXTURE
-#define ARM_COMPUTE_TEST_CANNY_EDGE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/CannyEdgeDetector.h"
-
-namespace arm_compute
-{
-class CLCannyEdge;
-class NECannyEdge;
-
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename ArrayType, typename FunctionType, typename T>
-class CannyEdgeValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(std::string image, int gradient_size, MagnitudeType norm_type, BorderMode border_mode, Format format)
- {
- CannyEdgeParameters params = canny_edge_parameters();
-
- _target = compute_target(image, gradient_size, norm_type, border_mode, format, params);
- _reference = compute_reference(image, gradient_size, norm_type, border_mode, format, params);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, RawTensor raw)
- {
- library->fill(tensor, raw);
- }
-
- TensorType compute_target(const std::string &image, int gradient_size, MagnitudeType norm_type, BorderMode border_mode, Format format, const CannyEdgeParameters &params)
- {
- // Load the image (cached by the library if loaded before)
- const RawTensor &raw = library->get(image, format);
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(raw.shape(), format);
- TensorType dst = create_tensor<TensorType>(raw.shape(), format);
- src.info()->set_format(format);
- dst.info()->set_format(format);
-
- // Create Canny edge configure function
- FunctionType canny_edge;
- canny_edge.configure(&src, &dst, params.upper_thresh, params.lower_thresh, gradient_size, static_cast<int>(norm_type) + 1, border_mode, params.constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), raw);
-
- // Compute function
- canny_edge.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const std::string &image, int gradient_size, MagnitudeType norm_type, BorderMode border_mode, Format format, const CannyEdgeParameters &params)
- {
- ARM_COMPUTE_ERROR_ON(format != Format::U8);
-
- // Load the image (cached by the library if loaded before)
- const RawTensor &raw = library->get(image, format);
-
- // Create reference
- SimpleTensor<T> src{ raw.shape(), format };
-
- // Fill reference
- fill(src, raw);
-
- return reference::canny_edge_detector<T>(src, params.upper_thresh, params.lower_thresh, gradient_size, norm_type, border_mode, params.constant_border_value);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CANNY_EDGE_FIXTURE */
diff --git a/tests/validation/fixtures/CastFixture.h b/tests/validation/fixtures/CastFixture.h
index 3a6efa22af..e9d624e6f3 100644
--- a/tests/validation/fixtures/CastFixture.h
+++ b/tests/validation/fixtures/CastFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class CastValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
{
_target = compute_target(shape, dt_in, dt_out, policy);
@@ -59,6 +58,7 @@ protected:
{
case DataType::U8:
case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
case DataType::S8:
case DataType::F32:
{
@@ -85,6 +85,16 @@ protected:
library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min), static_cast<int32_t>(signed_max));
break;
}
+ case DataType::U64:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<uint64_t>(unsigned_min), static_cast<uint64_t>(unsigned_max));
+ break;
+ }
+ case DataType::S64:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<int64_t>(signed_min), static_cast<int64_t>(signed_max));
+ break;
+ }
default:
ARM_COMPUTE_ERROR("NOT SUPPORTED!");
}
@@ -105,15 +115,15 @@ protected:
FunctionType cast;
cast.configure(&src, &dst, policy);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0, dt_in, dt_out);
diff --git a/tests/validation/fixtures/ChannelCombineFixture.h b/tests/validation/fixtures/ChannelCombineFixture.h
deleted file mode 100644
index 68d023715c..0000000000
--- a/tests/validation/fixtures/ChannelCombineFixture.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE
-#define ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/ChannelCombine.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-template <typename TensorType>
-inline std::vector<TensorType> create_tensor_planes(const TensorShape &shape, Format format)
-{
- TensorShape image_shape = adjust_odd_shape(shape, format);
- TensorInfo info(image_shape, Format::U8);
-
- std::vector<TensorType> tensor_planes;
-
- switch(format)
- {
- case Format::RGB888:
- case Format::RGBA8888:
- case Format::YUV444:
- {
- tensor_planes.resize(3);
-
- if(format == Format::RGBA8888)
- {
- tensor_planes.resize(4);
- }
-
- for(unsigned int plane_idx = 0; plane_idx < tensor_planes.size(); ++plane_idx)
- {
- tensor_planes[plane_idx].allocator()->init(info);
- }
-
- break;
- }
- case Format::YUYV422:
- case Format::UYVY422:
- {
- const TensorShape uv_shape = calculate_subsampled_shape(image_shape, format);
- const TensorInfo info_hor2(uv_shape, Format::U8);
-
- tensor_planes.resize(3);
-
- tensor_planes[0].allocator()->init(info);
- tensor_planes[1].allocator()->init(info_hor2);
- tensor_planes[2].allocator()->init(info_hor2);
- break;
- }
- case Format::NV12:
- case Format::NV21:
- case Format::IYUV:
- {
- const TensorShape sub2_shape = calculate_subsampled_shape(image_shape, format);
- const TensorInfo info_sub2(sub2_shape, Format::U8);
-
- tensor_planes.resize(3);
-
- tensor_planes[0].allocator()->init(info);
- tensor_planes[1].allocator()->init(info_sub2);
- tensor_planes[2].allocator()->init(info_sub2);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- break;
- }
-
- return tensor_planes;
-}
-} // namespace
-
-template <typename MultiImageType, typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ChannelCombineValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, Format format)
- {
- _num_planes = num_planes_from_format(format);
- _target = compute_target(shape, format);
- _reference = compute_reference(shape, format);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- library->fill_tensor_uniform(tensor, i);
- }
-
- template <typename U>
- std::vector<SimpleTensor<U>> create_tensor_planes_reference(const TensorShape &shape, Format format)
- {
- std::vector<SimpleTensor<U>> tensor_planes;
-
- TensorShape image_shape = adjust_odd_shape(shape, format);
-
- switch(format)
- {
- case Format::RGB888:
- case Format::RGBA8888:
- case Format::YUV444:
- {
- if(format == Format::RGBA8888)
- {
- tensor_planes.emplace_back(image_shape, Format::U8);
- }
-
- tensor_planes.emplace_back(image_shape, Format::U8);
- tensor_planes.emplace_back(image_shape, Format::U8);
- tensor_planes.emplace_back(image_shape, Format::U8);
- break;
- }
- case Format::YUYV422:
- case Format::UYVY422:
- {
- const TensorShape hor2_shape = calculate_subsampled_shape(image_shape, format);
-
- tensor_planes.emplace_back(image_shape, Format::U8);
- tensor_planes.emplace_back(hor2_shape, Format::U8);
- tensor_planes.emplace_back(hor2_shape, Format::U8);
- break;
- }
- case Format::NV12:
- case Format::NV21:
- case Format::IYUV:
- {
- const TensorShape shape_sub2 = calculate_subsampled_shape(image_shape, format);
-
- tensor_planes.emplace_back(image_shape, Format::U8);
- tensor_planes.emplace_back(shape_sub2, Format::U8);
- tensor_planes.emplace_back(shape_sub2, Format::U8);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- break;
- }
-
- return tensor_planes;
- }
-
- MultiImageType compute_target(const TensorShape &shape, Format format)
- {
- // Create tensors
- std::vector<TensorType> ref_src = create_tensor_planes<TensorType>(shape, format);
- MultiImageType dst = create_multi_image<MultiImageType>(shape, format);
-
- // Create and configure function
- FunctionType channel_combine;
-
- if(1 == _num_planes)
- {
- const TensorType *tensor_extra = ((Format::RGBA8888 == format) ? &ref_src[3] : nullptr);
- TensorType *tensor_dst = dynamic_cast<TensorType *>(dst.plane(0));
-
- channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], tensor_extra, tensor_dst);
- }
- else
- {
- channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], &dst);
- }
-
- for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
- {
- const TensorType *dst_plane = static_cast<const TensorType *>(dst.plane(plane_idx));
-
- ARM_COMPUTE_EXPECT(dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
- {
- ARM_COMPUTE_EXPECT(ref_src[plane_idx].info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Allocate tensors
- dst.allocate();
-
- for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
- {
- ref_src[plane_idx].allocator()->allocate();
- }
-
- for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
- {
- const TensorType *dst_plane = static_cast<const TensorType *>(dst.plane(plane_idx));
-
- ARM_COMPUTE_EXPECT(!dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
- {
- ARM_COMPUTE_EXPECT(!ref_src[plane_idx].info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Fill tensor planes
- for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
- {
- fill(AccessorType(ref_src[plane_idx]), plane_idx);
- }
-
- // Compute function
- channel_combine.run();
-
- return dst;
- }
-
- std::vector<SimpleTensor<T>> compute_reference(const TensorShape &shape, Format format)
- {
- // Create reference
- std::vector<SimpleTensor<T>> ref_src = create_tensor_planes_reference<T>(shape, format);
-
- // Fill references
- for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
- {
- fill(ref_src[plane_idx], plane_idx);
- }
-
- return reference::channel_combine<T>(shape, ref_src, format);
- }
-
- unsigned int _num_planes{};
- MultiImageType _target{};
- std::vector<SimpleTensor<T>> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE */
diff --git a/tests/validation/fixtures/ChannelExtractFixture.h b/tests/validation/fixtures/ChannelExtractFixture.h
deleted file mode 100644
index c3c2e17d09..0000000000
--- a/tests/validation/fixtures/ChannelExtractFixture.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_CHANNEL_EXTRACT_FIXTURE
-#define ARM_COMPUTE_TEST_CHANNEL_EXTRACT_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/ChannelExtract.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename MultiImageType, typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ChannelExtractValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, Format format, Channel channel)
- {
- shape = adjust_odd_shape(shape, format);
-
- _target = compute_target(shape, format, channel);
- _reference = compute_reference(shape, format, channel);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- library->fill_tensor_uniform(tensor, i);
- }
-
- std::vector<SimpleTensor<T>> create_tensor_planes_reference(const TensorShape &shape, Format format)
- {
- TensorShape input = adjust_odd_shape(shape, format);
-
- std::vector<SimpleTensor<T>> tensor_planes;
-
- switch(format)
- {
- case Format::RGB888:
- case Format::RGBA8888:
- case Format::YUYV422:
- case Format::UYVY422:
- {
- tensor_planes.emplace_back(input, format);
- break;
- }
- case Format::NV12:
- case Format::NV21:
- {
- const TensorShape shape_uv88 = calculate_subsampled_shape(shape, Format::UV88);
-
- tensor_planes.emplace_back(input, Format::U8);
- tensor_planes.emplace_back(shape_uv88, Format::UV88);
- break;
- }
- case Format::IYUV:
- {
- const TensorShape shape_sub2 = calculate_subsampled_shape(shape, Format::IYUV);
-
- tensor_planes.emplace_back(input, Format::U8);
- tensor_planes.emplace_back(shape_sub2, Format::U8);
- tensor_planes.emplace_back(shape_sub2, Format::U8);
- break;
- }
- case Format::YUV444:
- tensor_planes.emplace_back(input, Format::U8);
- tensor_planes.emplace_back(input, Format::U8);
- tensor_planes.emplace_back(input, Format::U8);
- break;
- default:
- ARM_COMPUTE_ERROR("Not supported");
- break;
- }
-
- return tensor_planes;
- }
-
- TensorType compute_target(const TensorShape &shape, Format format, Channel channel)
- {
- const unsigned int num_planes = num_planes_from_format(format);
-
- TensorShape dst_shape = calculate_subsampled_shape(shape, format, channel);
-
- // Create tensors
- MultiImageType ref_src = create_multi_image<MultiImageType>(shape, format);
- TensorType dst = create_tensor<TensorType>(dst_shape, Format::U8);
-
- // Create and configure function
- FunctionType channel_extract;
-
- if(1U == num_planes)
- {
- const TensorType *plane_src = static_cast<TensorType *>(ref_src.plane(0));
-
- channel_extract.configure(plane_src, channel, &dst);
- }
- else
- {
- channel_extract.configure(&ref_src, channel, &dst);
- }
-
- for(unsigned int plane_idx = 0; plane_idx < num_planes; ++plane_idx)
- {
- const TensorType *src_plane = static_cast<const TensorType *>(ref_src.plane(plane_idx));
-
- ARM_COMPUTE_EXPECT(src_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- ref_src.allocate();
- dst.allocator()->allocate();
-
- for(unsigned int plane_idx = 0; plane_idx < num_planes; ++plane_idx)
- {
- const TensorType *src_plane = static_cast<const TensorType *>(ref_src.plane(plane_idx));
-
- ARM_COMPUTE_EXPECT(!src_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensor planes
- for(unsigned int plane_idx = 0; plane_idx < num_planes; ++plane_idx)
- {
- TensorType *src_plane = static_cast<TensorType *>(ref_src.plane(plane_idx));
-
- fill(AccessorType(*src_plane), plane_idx);
- }
-
- // Compute function
- channel_extract.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, Format format, Channel channel)
- {
- const unsigned int num_planes = num_planes_from_format(format);
-
- // Create reference
- std::vector<SimpleTensor<T>> ref_src = create_tensor_planes_reference(shape, format);
-
- // Fill references
- for(unsigned int plane_idx = 0; plane_idx < num_planes; ++plane_idx)
- {
- fill(ref_src[plane_idx], plane_idx);
- }
-
- return reference::channel_extract<T>(shape, ref_src, format, channel);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CHANNEL_EXTRACT_FIXTURE */
diff --git a/tests/validation/fixtures/ChannelShuffleLayerFixture.h b/tests/validation/fixtures/ChannelShuffleLayerFixture.h
index c9aae2dc17..530dba3893 100644
--- a/tests/validation/fixtures/ChannelShuffleLayerFixture.h
+++ b/tests/validation/fixtures/ChannelShuffleLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ChannelShuffleLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, unsigned int num_groups, DataType data_type, DataLayout data_layout)
{
_target = compute_target(shape, data_type, num_groups, data_layout);
@@ -75,15 +74,15 @@ protected:
FunctionType channel_shuffle_func;
channel_shuffle_func.configure(&src, &dst, num_groups);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/Col2ImFixture.h b/tests/validation/fixtures/Col2ImFixture.h
index 5488f8a3ea..4d56d607b7 100644
--- a/tests/validation/fixtures/Col2ImFixture.h
+++ b/tests/validation/fixtures/Col2ImFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,10 +45,9 @@ namespace validation
using namespace arm_compute::misc::shape_calculator;
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool batch_size_on_z>
-class Col2ImValidationFixture : public framework::Fixture
+class Col2ImOpValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, const unsigned int convolved_width, unsigned int convolved_height, unsigned int num_groups, DataType data_type)
{
const Size2D convolved_dims(convolved_width, convolved_height);
@@ -74,23 +73,28 @@ protected:
// Create and configure function
FunctionType col2im_func;
- col2im_func.configure(&src, &dst, convolved_dims, num_groups);
+ col2im_func.configure(src.info(), dst.info(), convolved_dims, num_groups);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
+ arm_compute::ITensorPack pack =
+ {
+ { arm_compute::TensorType::ACL_SRC, &src },
+ { arm_compute::TensorType::ACL_DST, &dst }
+ };
// Compute function
- col2im_func.run();
+ col2im_func.run(pack);
return dst;
}
diff --git a/tests/validation/fixtures/ColorConvertFixture.h b/tests/validation/fixtures/ColorConvertFixture.h
deleted file mode 100644
index cbaff6dfce..0000000000
--- a/tests/validation/fixtures/ColorConvertFixture.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_COLOR_CONVERT_FIXTURE
-#define ARM_COMPUTE_TEST_COLOR_CONVERT_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/ColorConvert.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-}
-template <typename MultiImageType, typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ColorConvertValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, Format src_format, Format dst_format)
- {
- shape = adjust_odd_shape(shape, src_format);
- shape = adjust_odd_shape(shape, dst_format);
-
- _target = compute_target(shape, src_format, dst_format);
- _reference = compute_reference(shape, src_format, dst_format);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- library->fill_tensor_uniform(tensor, i);
- }
-
- std::vector<SimpleTensor<T>> create_tensor_planes_reference(const TensorShape &shape, Format format)
- {
- std::vector<SimpleTensor<T>> tensor_planes;
-
- switch(format)
- {
- case Format::RGB888:
- case Format::RGBA8888:
- case Format::YUYV422:
- case Format::UYVY422:
- {
- tensor_planes.emplace_back(shape, format);
- break;
- }
- case Format::NV12:
- case Format::NV21:
- {
- const TensorShape shape_uv88 = calculate_subsampled_shape(shape, Format::UV88);
-
- tensor_planes.emplace_back(shape, Format::U8);
- tensor_planes.emplace_back(shape_uv88, Format::UV88);
- break;
- }
- case Format::IYUV:
- {
- const TensorShape shape_sub2 = calculate_subsampled_shape(shape, Format::IYUV);
-
- tensor_planes.emplace_back(shape, Format::U8);
- tensor_planes.emplace_back(shape_sub2, Format::U8);
- tensor_planes.emplace_back(shape_sub2, Format::U8);
- break;
- }
- case Format::YUV444:
- {
- tensor_planes.emplace_back(shape, Format::U8);
- tensor_planes.emplace_back(shape, Format::U8);
- tensor_planes.emplace_back(shape, Format::U8);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- break;
- }
-
- return tensor_planes;
- }
-
- MultiImageType compute_target(const TensorShape &shape, Format src_format, Format dst_format)
- {
- _src_num_planes = num_planes_from_format(src_format);
- _dst_num_planes = num_planes_from_format(dst_format);
-
- // Create tensors
- MultiImageType ref_src = create_multi_image<MultiImageType>(shape, src_format);
- MultiImageType ref_dst = create_multi_image<MultiImageType>(shape, dst_format);
-
- // Create and configure function
- FunctionType color_convert;
-
- if(1U == _src_num_planes)
- {
- const TensorType *plane_src = static_cast<TensorType *>(ref_src.plane(0));
-
- if(1U == _dst_num_planes)
- {
- TensorType *plane_dst = static_cast<TensorType *>(ref_dst.plane(0));
- color_convert.configure(plane_src, plane_dst);
- }
- else
- {
- color_convert.configure(plane_src, &ref_dst);
- }
- }
- else
- {
- if(1U == _dst_num_planes)
- {
- TensorType *plane_dst = static_cast<TensorType *>(ref_dst.plane(0));
- color_convert.configure(&ref_src, plane_dst);
- }
- else
- {
- color_convert.configure(&ref_src, &ref_dst);
- }
- }
-
- for(unsigned int plane_idx = 0; plane_idx < _src_num_planes; ++plane_idx)
- {
- const TensorType *src_plane = static_cast<const TensorType *>(ref_src.plane(plane_idx));
-
- ARM_COMPUTE_EXPECT(src_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
- for(unsigned int plane_idx = 0; plane_idx < _dst_num_planes; ++plane_idx)
- {
- const TensorType *dst_plane = static_cast<const TensorType *>(ref_dst.plane(plane_idx));
-
- ARM_COMPUTE_EXPECT(dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Allocate tensors
- ref_src.allocate();
- ref_dst.allocate();
-
- for(unsigned int plane_idx = 0; plane_idx < _src_num_planes; ++plane_idx)
- {
- const TensorType *src_plane = static_cast<const TensorType *>(ref_src.plane(plane_idx));
- ARM_COMPUTE_EXPECT(!src_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- for(unsigned int plane_idx = 0; plane_idx < _dst_num_planes; ++plane_idx)
- {
- const TensorType *dst_plane = static_cast<const TensorType *>(ref_dst.plane(plane_idx));
- ARM_COMPUTE_EXPECT(!dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Fill tensor planes
- for(unsigned int plane_idx = 0; plane_idx < _src_num_planes; ++plane_idx)
- {
- TensorType *src_plane = static_cast<TensorType *>(ref_src.plane(plane_idx));
-
- fill(AccessorType(*src_plane), plane_idx);
- }
-
- // Compute function
- color_convert.run();
-
- return ref_dst;
- }
-
- std::vector<SimpleTensor<T>> compute_reference(const TensorShape &shape, Format src_format, Format dst_format)
- {
- // Create reference
- std::vector<SimpleTensor<T>> ref_src = create_tensor_planes_reference(shape, src_format);
-
- // Fill references
- for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
- {
- fill(ref_src[plane_idx], plane_idx);
- }
-
- return reference::color_convert<T>(shape, ref_src, src_format, dst_format);
- }
-
- unsigned int _src_num_planes{};
- unsigned int _dst_num_planes{};
- MultiImageType _target{};
- std::vector<SimpleTensor<T>> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_COLOR_CONVERT_FIXTURE */
diff --git a/tests/validation/fixtures/ComparisonFixture.h b/tests/validation/fixtures/ComparisonFixture.h
index d1e1a539c7..f25d5abb73 100644
--- a/tests/validation/fixtures/ComparisonFixture.h
+++ b/tests/validation/fixtures/ComparisonFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComparisonValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1)
{
_target = compute_target(op, shape0, shape1, data_type, qinfo0, qinfo1);
@@ -71,18 +70,18 @@ protected:
FunctionType comp_op;
comp_op.configure(&ref_src1, &ref_src2, &dst, op);
- ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
ref_src1.allocator()->allocate();
ref_src2.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(ref_src1), 0);
@@ -117,7 +116,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComparisonBroadcastValidationFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type)
{
ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape0, shape1, data_type, QuantizationInfo(), QuantizationInfo());
@@ -128,7 +126,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComparisonValidationFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(ComparisonOperation op, const TensorShape &shape, DataType data_type)
{
ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape, shape, data_type, QuantizationInfo(), QuantizationInfo());
@@ -139,7 +136,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComparisonValidationQuantizedFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(ComparisonOperation op, const TensorShape &shape, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1)
{
@@ -151,7 +147,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComparisonQuantizedBroadcastValidationFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1)
{
ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape0, shape1, data_type, qinfo0, qinfo1);
diff --git a/tests/validation/fixtures/ComputeAllAnchorsFixture.h b/tests/validation/fixtures/ComputeAllAnchorsFixture.h
index e837bd4838..620f1b53fa 100644
--- a/tests/validation/fixtures/ComputeAllAnchorsFixture.h
+++ b/tests/validation/fixtures/ComputeAllAnchorsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComputeAllAnchorsGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type, QuantizationInfo qinfo)
{
_target = compute_target(num_anchors, data_type, info, qinfo);
@@ -69,13 +68,13 @@ protected:
FunctionType compute_all_anchors;
compute_all_anchors.configure(&anchors, &all_anchors, info);
- ARM_COMPUTE_EXPECT(all_anchors.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(all_anchors.info()->is_resizable());
// Allocate tensors
all_anchors.allocator()->allocate();
anchors.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!all_anchors.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!all_anchors.info()->is_resizable());
// Fill tensors
fill(AccessorType(anchors));
@@ -107,7 +106,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComputeAllAnchorsFixture : public ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type)
{
ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(num_anchors, info, data_type, QuantizationInfo());
@@ -118,7 +116,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ComputeAllAnchorsQuantizedFixture : public ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type, QuantizationInfo qinfo)
{
ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(num_anchors, info, data_type, qinfo);
diff --git a/tests/validation/fixtures/ConcatenateLayerFixture.h b/tests/validation/fixtures/ConcatenateLayerFixture.h
index d1eed63d41..3a021661ac 100644
--- a/tests/validation/fixtures/ConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/ConcatenateLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,11 +43,13 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T, bool CI = true>
class ConcatenateLayerValidationFixture : public framework::Fixture
{
+private:
+ using SrcITensorType = typename std::conditional<CI, const ITensorType, ITensorType>::type;
+
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, unsigned int axis)
{
// Create input shapes
@@ -67,8 +69,8 @@ public:
{
qi = QuantizationInfo(1.f / 255.f, offset_dis(gen));
}
- std::bernoulli_distribution mutate_dis(0.5f);
- std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
+ std::bernoulli_distribution mutate_dis(0.5f);
+ std::uniform_real_distribution<float> change_dis(-0.25f, 0.f);
// Generate more shapes based on the input
for(auto &s : shapes)
@@ -95,8 +97,8 @@ protected:
TensorType compute_target(const std::vector<TensorShape> &shapes, const std::vector<QuantizationInfo> &qinfo, DataType data_type, unsigned int axis)
{
- std::vector<TensorType> srcs;
- std::vector<ITensorType *> src_ptrs;
+ std::vector<TensorType> srcs;
+ std::vector<SrcITensorType *> src_ptrs;
// Create tensors
srcs.reserve(shapes.size());
@@ -116,20 +118,20 @@ protected:
for(auto &src : srcs)
{
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
}
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
for(auto &src : srcs)
{
src.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
}
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
int i = 0;
diff --git a/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h b/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h
index 0fcef5c31f..7ad14e1b40 100644
--- a/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h
+++ b/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ConvertFullyConnectedWeightsValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int weights_w, DataLayout training_data_layout, DataType data_type)
{
const unsigned int height = input_shape.x() * input_shape.y() * input_shape.z();
@@ -61,14 +60,19 @@ protected:
{
case DataType::QASYMM8:
{
- std::uniform_int_distribution<uint8_t> distribution(0, 10);
+ std::uniform_int_distribution<uint32_t> distribution(0, 10);
library->fill(tensor, distribution, i);
break;
}
- case DataType::F32:
case DataType::F16:
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -88,15 +92,15 @@ protected:
convert_weights.configure(&src, &dst, input_shape, training_data_layout);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
diff --git a/tests/validation/fixtures/ConvolutionFixture.h b/tests/validation/fixtures/ConvolutionFixture.h
deleted file mode 100644
index 04172a629e..0000000000
--- a/tests/validation/fixtures/ConvolutionFixture.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2017-2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_CONVOLUTION_FIXTURE
-#define ARM_COMPUTE_TEST_CONVOLUTION_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Convolution.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionValidationFixture : public framework::Fixture
-{
-protected:
- template <typename...>
- void setup(TensorShape shape, DataType output_data_type, BorderMode border_mode, const unsigned int width, const unsigned int height, const bool is_separable = false)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- std::uniform_int_distribution<uint8_t> scale_distribution(1, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- // Generate random scale value between 1 and 255.
- const uint32_t scale = scale_distribution(gen);
-
- ARM_COMPUTE_ERROR_ON(3 != width && 5 != width && 7 != width && 9 != width);
- ARM_COMPUTE_ERROR_ON(3 != height && 5 != height && 7 != height && 9 != height);
-
- std::vector<int16_t> conv(width * height);
-
- _width = width;
- _height = height;
-
- if(is_separable)
- {
- init_separable_conv(conv.data(), width, height, library->seed());
- }
- else
- {
- init_conv(conv.data(), width, height, library->seed());
- }
-
- _target = compute_target(shape, output_data_type, conv.data(), scale, border_mode, constant_border_value);
- _reference = compute_reference(shape, output_data_type, conv.data(), scale, border_mode, constant_border_value);
- }
-
- template <typename U>
- void fill(U &&tensor, int i)
- {
- library->fill_tensor_uniform(tensor, i);
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType output_data_type, const int16_t *conv, uint32_t scale, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create reference
- SimpleTensor<uint8_t> src{ shape, DataType::U8 };
-
- // Fill reference
- fill(src, 0);
-
- // Compute reference
- return reference::convolution<T>(src, output_data_type, conv, scale, border_mode, constant_border_value, _width, _height);
- }
-
- virtual TensorType compute_target(const TensorShape &shape, DataType output_data_type, const int16_t *conv, uint32_t scale, BorderMode border_mode, uint8_t constant_border_value) = 0;
-
- BorderMode _border_mode{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
- unsigned int _width{};
- unsigned int _height{};
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionSquareValidationFixture : public ConvolutionValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType output_data_type, BorderMode border_mode, const unsigned int width)
- {
- ConvolutionValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, output_data_type, border_mode, width, width);
- }
-
-protected:
- TensorType compute_target(const TensorShape &shape, DataType output_data_type, const int16_t *conv, uint32_t scale, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, DataType::U8);
- TensorType dst = create_tensor<TensorType>(shape, output_data_type);
-
- // Create and configure function
- FunctionType convolution;
- convolution.configure(&src, &dst, conv, scale, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- this->fill(AccessorType(src), 0);
- this->fill(AccessorType(dst), 1);
-
- // Compute function
- convolution.run();
-
- return dst;
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionSeparableValidationFixture : public ConvolutionValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType output_data_type, BorderMode border_mode, const unsigned int width)
- {
- ConvolutionValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, output_data_type, border_mode, width, width, true);
- }
-
-protected:
- TensorType compute_target(const TensorShape &shape, DataType output_data_type, const int16_t *conv, uint32_t scale, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, DataType::U8);
- TensorType dst = create_tensor<TensorType>(shape, output_data_type);
-
- // Create and configure function
- FunctionType convolution;
- convolution.configure(&src, &dst, conv, scale, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- this->fill(AccessorType(src), 0);
- this->fill(AccessorType(dst), 1);
-
- // Compute function
- convolution.run();
-
- return dst;
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionRectangleValidationFixture : public ConvolutionValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType output_data_type, BorderMode border_mode, const unsigned int width, const unsigned int height)
- {
- ConvolutionValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, output_data_type, border_mode, width, height);
- }
-
-protected:
- TensorType compute_target(const TensorShape &shape, DataType output_data_type, const int16_t *conv, uint32_t scale, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, DataType::U8);
- TensorType dst = create_tensor<TensorType>(shape, output_data_type);
-
- // Create and configure function
- FunctionType convolution;
- convolution.configure(&src, &dst, conv, this->_width, this->_height, scale, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- this->fill(AccessorType(src), 0);
- this->fill(AccessorType(dst), 1);
-
- // Compute function
- convolution.run();
-
- return dst;
- }
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CONVOLUTION_FIXTURE */
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index b4abebe18d..0622e5e6f0 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,12 +21,19 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/graph/Utils.h"
+#ifdef ARM_COMPUTE_OPENCL_ENABLED
+#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
+#endif // ARM_COMPUTE_OPENCL_ENABLED
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "src/graph/mutators/MutatorUtils.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
@@ -35,19 +42,48 @@
#include "tests/validation/Helpers.h"
#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/ConvolutionLayer.h"
+#include "tests/validation/reference/PadLayer.h"
#include "tests/validation/reference/Permute.h"
#include "tests/validation/reference/Utils.h"
#include <random>
+#include <type_traits>
namespace arm_compute
{
-class NEConvolutionLayer;
-
namespace test
{
namespace validation
{
+namespace detail
+{
+template <typename ConvolutionFunction, typename TensorType>
+#ifdef ARM_COMPUTE_OPENCL_ENABLED
+std::enable_if_t<!std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
+#else // ARM_COMPUTE_OPENCL_ENABLED
+void
+#endif // ARM_COMPUTE_OPENCL_ENABLED
+configure_conv_function(ConvolutionFunction &func,
+ TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
+ const PadStrideInfo &info, const WeightsInfo &weights_info,
+ const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
+{
+ func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, false /* enable_fast_math */, num_groups);
+}
+
+#ifdef ARM_COMPUTE_OPENCL_ENABLED
+template <typename ConvolutionFunction, typename TensorType>
+std::enable_if_t<std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
+configure_conv_function(ConvolutionFunction &func,
+ TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
+ const PadStrideInfo &info, const WeightsInfo &weights_info,
+ const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
+{
+ func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, num_groups);
+}
+#endif // ARM_COMPUTE_OPENCL_ENABLED
+} // namespace detail
+
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
class ConvolutionValidationGenericFixture : public framework::Fixture
{
@@ -56,26 +92,84 @@ public:
|| std::is_same<typename std::decay<T>::type, int8_t>::value,
int32_t, T >::type;
+ void setup_quantization(TensorShape input_shape, TensorShape weights_shape, QuantizationInfo &input_q_info,
+ QuantizationInfo &weights_q_info, DataType data_type)
+ {
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ _quantization_info = QuantizationInfo(scale_lhs, offset_lhs);
+ _weight_quantization_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info,
+ weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */,
+ data_type, 0.5f /* bias_fraction */);
+
+ _dst_q_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+ }
+
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
- DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info)
+ DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
+ bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false)
{
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
+ + weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] +
+ mixed_layout + (data_type == DataType::QASYMM8_SIGNED) + (data_layout == DataLayout::NHWC);
+
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_weights_data_type = weights_data_type;
- _is_quantized = is_data_type_quantized_asymmetric(data_type);
+ const bool is_quantized = is_data_type_quantized(weights_data_type);
_is_bfloat16 = data_type == DataType::BFLOAT16;
- _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
+ _bias_data_type = is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
_output_data_type = _is_bfloat16 ? DataType::F32 : data_type;
_quantization_info = quantization_info;
_weight_quantization_info = weight_quantization_info;
_data_layout = data_layout;
+ _dst_q_info = quantization_info;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
+ if(is_quantized && !is_data_type_quantized_symmetric(weights_data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(input_shape, weights_shape, _quantization_info, _weight_quantization_info, data_type);
+ _use_dynamic_output_quant = true;
+ }
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer, padded_weights);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer);
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
void regularize_values(void *values, size_t size)
{
float *fvalues = static_cast<float *>(values);
@@ -92,16 +186,34 @@ protected:
{
case DataType::QASYMM8:
{
- std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
- library->fill(tensor, distribution, i);
+ if(_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(0, 255);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
break;
}
case DataType::QASYMM8_SIGNED:
{
- std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
- library->fill(tensor, distribution, i);
+ if(_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(-128, 127);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
break;
}
case DataType::QSYMM8_PER_CHANNEL:
@@ -120,21 +232,31 @@ protected:
max_bound = bounds.second;
}
}
- std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
+ std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
library->fill(tensor, distribution, i);
break;
}
case DataType::S32:
{
- std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
library->fill(tensor, distribution, i);
break;
}
case DataType::BFLOAT16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -143,8 +265,9 @@ protected:
}
}
+ // given input is IN nchw format
TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info)
+ bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false)
{
ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
@@ -155,6 +278,18 @@ protected:
permute(input_shape, PermutationVector(2U, 0U, 1U));
permute(weights_shape, PermutationVector(2U, 0U, 1U));
permute(output_shape, PermutationVector(2U, 0U, 1U));
+
+ if(pre_pad_layer.size() > 0)
+ {
+ // make sure paddings exist for each c,h,w dimensions
+ for(unsigned int i = 0; i < 3 - pre_pad_layer.size(); ++i)
+ {
+ pre_pad_layer.push_back({ 0, 0 });
+ }
+
+ // rotate padding info from nchw to nhwc
+ std::rotate(pre_pad_layer.begin(), pre_pad_layer.begin() + 2, pre_pad_layer.begin() + 3);
+ }
}
const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
@@ -166,17 +301,47 @@ protected:
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
- TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, QuantizationInfo() /*bias is not a quantized type*/, _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _dst_q_info, _data_layout);
// Create and configure function
FunctionType conv;
- conv.configure(&src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ const unsigned int height_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::HEIGHT);
+ const unsigned int width_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::WIDTH);
+
+ const PaddingInfo pad_w = width_index < pre_pad_layer.size() ? pre_pad_layer[width_index] : PaddingInfo(0, 0);
+ const PaddingInfo pad_h = height_index < pre_pad_layer.size() ? pre_pad_layer[height_index] : PaddingInfo(0, 0);
+
+ if(pre_pad_layer.size() > 0 && arm_compute::graph::is_padding_in_height_or_width(_data_layout, pre_pad_layer))
+ {
+ // this is the logic implemented in NodeFusionMutator -> fuse_pad_with_convolution
+ const PadStrideInfo new_conv_info(
+ info.stride().first,
+ info.stride().second,
+ info.pad_left() + pad_w.first,
+ info.pad_right() + pad_w.second,
+ info.pad_top() + pad_h.first,
+ info.pad_bottom() + pad_h.second,
+ info.round());
+ detail::configure_conv_function(conv, &src, &weights, &bias, &dst, new_conv_info, weights_info, dilation, act_info, num_groups);
+ }
+ else
+ {
+ detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
+ }
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ // Test "add padding after configure" behavior. This behavior should not affect the correctness
+ add_padding_x({ &src, &bias, &dst }, _data_layout);
+ // Padding weights may affect code path in some backends
+ if (padded_weights)
+ {
+ add_padding_x({ &weights }, _data_layout);
+ }
// Allocate tensors
src.allocator()->allocate();
@@ -184,24 +349,31 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(bias), 2);
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(weights), 1 + _hash);
+ fill(AccessorType(bias), 2 + _hash);
- // Compute NEConvolutionLayer function
- conv.run();
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- const Size2D &dilation, const ActivationLayerInfo act_info)
+ const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
{
ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
@@ -217,9 +389,9 @@ protected:
SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info };
SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info };
- fill(src, 0);
- fill(weights, 1);
- fill(bias, 2);
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(bias, 2 + _hash);
// Fill with bfloat16 to perform the conversion and reduce the mismatches in the output
if(_is_bfloat16)
@@ -228,9 +400,14 @@ protected:
regularize_values(static_cast<void *>(weights.data()), weights.num_elements());
}
- return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
+ if(pre_pad_layer.size() > 0)
+ {
+ src = reference::pad_layer<T>(src, pre_pad_layer, PixelValue(0), PaddingMode::CONSTANT);
+ }
+
+ return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info),
act_info) :
- reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
+ reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info);
}
TensorType _target{};
@@ -242,34 +419,63 @@ protected:
DataLayout _data_layout{};
QuantizationInfo _quantization_info{};
QuantizationInfo _weight_quantization_info{};
- bool _is_quantized = false;
+ QuantizationInfo _dst_q_info{};
bool _is_bfloat16 = false;
+ bool _mixed_layout = false;
+ bool _use_dynamic_output_quant{false};
+ int32_t _hash{0};
+ int32_t _min_bias{-100};
+ int32_t _max_bias{100};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
DataLayout data_layout, ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
data_type, data_type, data_layout,
- QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
+class ConvolutionValidationPaddedWeightsFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
+ DataLayout data_layout)
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
+ data_type, data_type, data_layout,
+ QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), mixed_layout, PaddingList({}), true);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
+class ConvolutionValidationWithPaddingFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
+ DataLayout data_layout, ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
+ data_type, data_type, data_layout,
+ QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout, pre_pad_layer);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
- data_type, data_type, data_layout, quantization_info, quantization_info, act_info);
+ data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout);
}
};
@@ -277,13 +483,12 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type)
{
- std::vector<float> weights_scales{};
- std::mt19937 gen(library->seed());
- std::uniform_real_distribution<> dis(0.01f, 1);
+ std::vector<float> weights_scales{};
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> dis(0.01f, 1.f);
for(size_t i = 0; i < output_shape[2]; ++i)
{
weights_scales.push_back(dis(gen));
@@ -293,7 +498,311 @@ public:
quantization_info, QuantizationInfo(weights_scales), act_info);
}
};
+
+
+#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+inline TensorInfo prepare_weights(const TensorInfo tensor_info, const arm_compute::WeightFormat weight_format)
+{
+ const DataLayout data_layout = tensor_info.data_layout();
+ ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
+ const DataType data_type = tensor_info.data_type();
+ const TensorShape tensor_shape = tensor_info.tensor_shape();
+ const int N = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
+ const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
+ const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
+ const int C = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
+
+ const int interleave_by = arm_compute::interleave_by(weight_format);
+ const int block_by = arm_compute::block_by(weight_format);
+ const int Ip = arm_gemm::roundup<unsigned int>(C, block_by); // C'=I'
+ const int Op = arm_gemm::roundup<unsigned int>(N, interleave_by); // O'=N'
+
+ arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes();
+ strides_in_bytes.set(1, Ip * interleave_by * H * W * tensor_info.element_size());
+ strides_in_bytes.set(2, Ip * Op * tensor_info.element_size());
+
+ const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes();
+
+ // Total size needs to include padded dimensions
+ const size_t total_size_in_bytes = Op * H * W * Ip * tensor_info.element_size();
+
+ const TensorShape TS(Ip, W, H, Op);
+
+ TensorInfo new_tensor_info = tensor_info;
+ new_tensor_info.init(TS, 1 /*num_channels, deprecated*/, data_type, strides_in_bytes,
+ offset_first_element_in_bytes, total_size_in_bytes);
+ return new_tensor_info;
+}
+
+template <typename ScalarType, typename AccessorType>
+inline void rearrange_data(const AccessorType src, AccessorType dst, const arm_compute::WeightFormat weight_format)
+{
+ ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(weight_format), framework::LogLevel::ERRORS);
+ // Data Layout: OHWIo<interleave_by>i<block_by>
+ const int interleave_by = arm_compute::interleave_by(weight_format);
+ const int block_by = arm_compute::block_by(weight_format);
+ const TensorShape src_tensor_shape = src.shape();
+ const DataLayout data_layout = src.data_layout();
+ ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
+ const unsigned int O = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
+ const unsigned int H = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
+ const unsigned int W = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
+ const unsigned int I = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
+ const unsigned int Ip = arm_gemm::roundup<unsigned int>(I, block_by); // C'=I'
+ const unsigned int Op = arm_gemm::roundup<unsigned int>(O, interleave_by); // N'=O'
+
+ ARM_COMPUTE_EXPECT_EQUAL(Op * H * W * Ip, (unsigned)dst.num_elements(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS);
+
+ const ScalarType *src_ptr = reinterpret_cast<const ScalarType *>(src.data());
+ ScalarType *dst_ptr = reinterpret_cast<ScalarType *>(dst.data());
+ for(unsigned i = 0; i < I; ++i)
+ for(unsigned w = 0; w < W; ++w)
+ for(unsigned h = 0; h < H; ++h)
+ for(unsigned o = 0; o < O; ++o)
+ {
+ ScalarType src_element;
+ switch(data_layout)
+ {
+ case DataLayout::NHWC:
+ {
+ src_element = src_ptr[o * H * W * I + h * W * I + w * I + i];
+ }
+ break;
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported memory layout.");
+ }
+ }
+ const int x5 = std::floor(((float)o) / interleave_by);
+ const int x4 = h;
+ const int x3 = w;
+ const int x2 = std::floor((float)i / block_by);
+ const int x1 = o % interleave_by;
+ const int x0 = i % block_by;
+ unsigned dst_idx = x5 * H * W * Ip * interleave_by
+ + x4 * W * Ip * interleave_by
+ + x3 * Ip * interleave_by
+ + x2 * interleave_by * block_by
+ + x1 * block_by
+ + x0;
+ dst_ptr[dst_idx] = src_element;
+ }
+}
+
+template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
+class VariableWeightsFixtureBaseClass : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataLayout data_layout,
+ const DataType data_type)
+ {
+ conv = std::make_unique<ConvolutionFunction>();
+ // prepare data
+ _data_layout = data_layout;
+ // Fixed format kernels for variable weights can work only with NHWC format.
+ ARM_COMPUTE_EXPECT_EQUAL(_data_layout, DataLayout::NHWC, framework::LogLevel::ERRORS);
+ _data_type = data_type;
+ // run the code
+ compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
+ compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
+ }
+ void teardown()
+ {
+ _target.allocator()->free();
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+private:
+ virtual void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
+ const PadStrideInfo &conv_info,
+ const Size2D &dilation) = 0;
+
+ void compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &conv_info,
+ const Size2D &dilation)
+ {
+ // The dataset is always in NCHW format - we need to make C the
+ // innermost dimension because the fixed-format kernel work only
+ // with NHWC layout.
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+ const auto src_tensor_info = TensorInfo(input_shape, 1, _data_type, _data_layout);
+ const auto weight_tensor_info = TensorInfo(weights_shape, 1, _data_type, _data_layout);
+ const auto bias_tensor_info = TensorInfo(bias_shape, 1, _data_type, _data_layout);
+ auto dst_tensor_info = TensorInfo(output_shape, 1, _data_type, _data_layout);
+
+ const int kernel_height = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT)];
+ const int kernel_width = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH)];
+ const int num_kernels = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::BATCHES)];
+
+ const WeightsInfo query_weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, arm_compute::WeightFormat::ANY);
+ const bool kernel_found = bool(ConvolutionFunction::has_opt_impl(_computed_weight_format, &src_tensor_info, &weight_tensor_info,
+ &bias_tensor_info, &dst_tensor_info, conv_info, query_weights_info));
+ // Make surethat the setup founds a fixed-format kernel as requested by the test case.
+ ARM_COMPUTE_EXPECT(kernel_found, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(_computed_weight_format), framework::LogLevel::ERRORS);
+
+ const WeightsInfo weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, _computed_weight_format);
+ configure_and_execute_kernel(src_tensor_info, weight_tensor_info, bias_tensor_info, dst_tensor_info, weights_info, conv_info,
+ dilation);
+ }
+ void compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
+ const Size2D &dilation)
+ {
+ ARM_COMPUTE_UNUSED(input_shape, weights_shape, bias_shape, output_shape, info,
+ dilation);
+
+ // Create reference
+ SimpleTensor<ScalarType> src{ input_shape, _data_type };
+ SimpleTensor<ScalarType> weights{ weights_shape, _data_type };
+ SimpleTensor<ScalarType> bias{ bias_shape, _data_type };
+ fill(src, 0);
+ fill(bias, 1);
+ fill(weights, 3);
+ _reference = reference::convolution_layer<ScalarType>(src, weights, bias, output_shape, info, dilation, 1 /*num_groups*/);
+ }
+ DataLayout _data_layout{};
+ DataType _data_type{};
+
+protected:
+ std::unique_ptr<ConvolutionFunction> conv{};
+ arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
+ TensorClass _target{};
+ SimpleTensor<ScalarType> _reference{};
+};
+
+template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
+class VariableWeightsFixture : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
+{
+ void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
+ const PadStrideInfo &conv_info,
+ const Size2D &dilation)
+ {
+ this->conv->configure(&src_tensor_info, &weight_tensor_info, &bias_tensor_info, &dst_tensor_info, conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
+
+ // Allocate input tensors
+ auto src = create_tensor<TensorClass>(src_tensor_info);
+ auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
+ const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
+ auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
+ auto bias = create_tensor<TensorClass>(bias_tensor_info);
+ src.allocator()->allocate();
+ weights_original.allocator()->allocate();
+ weights_transformed.allocator()->allocate();
+ bias.allocator()->allocate();
+ // Allocate destination tensor
+ this->_target = create_tensor<TensorClass>(dst_tensor_info);
+ this->_target.allocator()->allocate();
+
+ // Prepare source and biases that are left unchanged.
+ this->fill(AccessorType(src), 0);
+ this->fill(AccessorType(bias), 1);
+
+ // First run
+ this->fill(AccessorType(weights_original), 2);
+ rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
+ ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weights_transformed }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &(this->_target) } };
+ this->conv->run(run_pack);
+ // Second run, with new weights
+ this->fill(AccessorType(weights_original), 3);
+ rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
+ this->conv->run(run_pack);
+ src.allocator()->free();
+ weights_original.allocator()->free();
+ weights_transformed.allocator()->free();
+ bias.allocator()->free();
+ }
+};
+
+template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
+class VariableWeightsFixtureNEInterface : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
+{
+ void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
+ const PadStrideInfo &conv_info,
+ const Size2D &dilation)
+ {
+ // Allocate input tensors
+ auto src = create_tensor<TensorClass>(src_tensor_info);
+ auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
+ const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
+ auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
+ auto bias = create_tensor<TensorClass>(bias_tensor_info);
+ src.allocator()->allocate();
+ weights_original.allocator()->allocate();
+ weights_transformed.allocator()->allocate();
+ bias.allocator()->allocate();
+ // Allocate destination tensor
+ this->_target = create_tensor<TensorClass>(dst_tensor_info);
+ this->_target.allocator()->allocate();
+ this->conv->configure(&src, &weights_transformed, &bias, &(this->_target), conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
+ // Prepare source and biases that are left unchanged.
+ this->fill(AccessorType(src), 0);
+ this->fill(AccessorType(bias), 1);
+
+ // First run
+ this->fill(AccessorType(weights_original), 2);
+ rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
+ this->conv->run();
+ // Second run, with new weights
+ this->fill(AccessorType(weights_original), 3);
+ rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
+ this->conv->run();
+ src.allocator()->free();
+ weights_original.allocator()->free();
+ weights_transformed.allocator()->free();
+ bias.allocator()->free();
+ }
+};
+
+template <typename ConvolutionClass, bool enable_fast_math>
+class HasOptImplFixture : public framework::Fixture
+{
+public:
+ void setup(DataType data_type, arm_compute::WeightFormat query_weight_format)
+ {
+ auto conv = std::make_unique<ConvolutionClass>();
+ const auto src_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
+ const auto weight_info = TensorInfo(TensorShape(64, 3U, 3U, 64U), 1, enable_fast_math ? DataType::BFLOAT16 : data_type, DataLayout::NHWC);
+ const auto bias_info = TensorInfo(TensorShape(64U), 1, data_type, DataLayout::NHWC);
+ auto dst_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
+ const auto conv_info = PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR);
+ const WeightsInfo weights_info(false, 3U, 3U, 64U, false, query_weight_format);
+ _kernel_found = bool(ConvolutionClass::has_opt_impl(_computed_weight_format, &src_info, &weight_info,
+ &bias_info, &dst_info, conv_info, weights_info,
+ Size2D(1U, 1U) /*dilation*/, ActivationLayerInfo() /*act_info*/, enable_fast_math));
+ }
+
+protected:
+ bool _kernel_found{ false };
+ arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
+};
+#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/CopyFixture.h b/tests/validation/fixtures/CopyFixture.h
index 534d5b3145..f5e711a500 100644
--- a/tests/validation/fixtures/CopyFixture.h
+++ b/tests/validation/fixtures/CopyFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class CopyFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type)
{
_target = compute_target(input_shape, output_shape, data_type);
@@ -61,7 +60,7 @@ protected:
TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type)
{
// Check if indeed the input shape can be reshape to the output one
- ARM_COMPUTE_EXPECT(input_shape.total_size() == output_shape.total_size(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input_shape.total_size() == output_shape.total_size());
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, data_type);
@@ -72,15 +71,15 @@ protected:
copy.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
diff --git a/tests/validation/fixtures/CropResizeFixture.h b/tests/validation/fixtures/CropResizeFixture.h
index 450c68e0e9..30a3fd8569 100644
--- a/tests/validation/fixtures/CropResizeFixture.h
+++ b/tests/validation/fixtures/CropResizeFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,6 @@
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
-#include "tests/RawLutAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
@@ -47,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class CropResizeFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape src_shape, TensorShape boxes_shape, Coordinates2D crop_size, InterpolationPolicy method,
float extrapolation_value, bool is_outside_bounds, DataType data_type)
{
@@ -88,15 +86,15 @@ protected:
FunctionType crop;
crop.configure(&src, &boxes, &boxes_ind, &dst, crop_size, method, extrapolation_value);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h
index 57951c0f36..83170c413c 100644
--- a/tests/validation/fixtures/DeconvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,22 +42,24 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
class DeconvolutionLayerFixtureBase : public framework::Fixture
{
public:
using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type;
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info,
- DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias)
+ DataType data_type, DataType weights_data_type, DataLayout data_layout,
+ QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, QuantizationInfo weights_quantization_info, bool add_bias)
{
- _data_type = data_type;
- _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _data_layout = data_layout;
- _input_quantization_info = input_quantization_info;
- _output_quantization_info = output_quantization_info;
+ _data_type = data_type;
+ _weights_data_type = weights_data_type;
+ _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+ _data_layout = data_layout;
+ _input_quantization_info = input_quantization_info;
+ _output_quantization_info = output_quantization_info;
+ _weights_quantization_info = weights_quantization_info;
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, add_bias);
_reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, add_bias);
@@ -72,14 +74,34 @@ protected:
case DataType::QASYMM8:
{
std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, i);
break;
}
case DataType::QASYMM8_SIGNED:
{
std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::QSYMM8_PER_CHANNEL:
+ {
+ int min_bound = 128;
+ int max_bound = -127;
+ for(size_t i = 0; i < _input_quantization_info.scale().size(); i++)
+ {
+ std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ if(bounds.first < min_bound)
+ {
+ min_bound = bounds.first;
+ }
+ if(bounds.second > max_bound)
+ {
+ max_bound = bounds.second;
+ }
+ }
+ std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
library->fill(tensor, distribution, i);
break;
}
@@ -90,9 +112,14 @@ protected:
break;
}
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -108,8 +135,7 @@ protected:
{
case DataType::S32:
{
- const int32_t value = static_cast<int32_t>(tensor.quantization_info().uniform().offset);
- library->fill_tensor_value(tensor, value);
+ library->fill_tensor_value(tensor, 0);
break;
}
case DataType::F16:
@@ -135,7 +161,7 @@ protected:
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _input_quantization_info, _data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, _input_quantization_info, _data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout);
TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _input_quantization_info, _data_layout);
TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _output_quantization_info, _data_layout);
@@ -143,13 +169,13 @@ protected:
FunctionType conv;
conv.configure(&src, &weights, add_bias ? &bias : nullptr, &dst, info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
if(add_bias)
{
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
}
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -160,13 +186,13 @@ protected:
}
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
if(add_bias)
{
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
}
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
@@ -178,7 +204,6 @@ protected:
// Compute DeconvolutionLayer function
conv.run();
-
return dst;
}
@@ -187,7 +212,7 @@ protected:
{
// Create reference
SimpleTensor<T> src{ input_shape, _data_type, 1, _input_quantization_info };
- SimpleTensor<T> weights{ weights_shape, _data_type, 1, _input_quantization_info };
+ SimpleTensor<TW> weights{ weights_shape, _weights_data_type, 1, _weights_quantization_info };
SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _input_quantization_info };
// Fill reference
@@ -202,28 +227,27 @@ protected:
{
fill_zeros(bias);
}
-
- return reference::deconvolution_layer<T>(src, weights, bias, output_shape, info, _output_quantization_info);
+ return reference::deconvolution_layer<T, TW>(src, weights, bias, output_shape, info, _output_quantization_info);
}
TensorType _target{};
SimpleTensor<T> _reference{};
DataType _data_type{};
+ DataType _weights_data_type{};
DataType _bias_data_type{};
DataLayout _data_layout{};
QuantizationInfo _input_quantization_info{};
QuantizationInfo _output_quantization_info{};
+ QuantizationInfo _weights_quantization_info{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
-class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>
+class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
{
- ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
@@ -231,20 +255,18 @@ public:
TensorInfo input_info(input_shape, 1, data_type);
TensorInfo weights_info(weights_shape, 1, data_type);
TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
- DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo(),
- QuantizationInfo(), add_bias);
+ DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(),
+ QuantizationInfo(), QuantizationInfo(), add_bias);
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
-class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>
+class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int pad_left, unsigned int pad_right, unsigned int pad_top,
unsigned int pad_bottom, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
{
- ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, pad_left, pad_right, pad_top, pad_bottom, DimensionRoundingType::CEIL);
@@ -252,20 +274,18 @@ public:
TensorInfo input_info(input_shape, 1, data_type);
TensorInfo weights_info(weights_shape, 1, data_type);
TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
- DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo(),
- QuantizationInfo(), add_bias);
+ DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(),
+ QuantizationInfo(), QuantizationInfo(), add_bias);
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
-class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>
+class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias)
{
- ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
@@ -273,8 +293,38 @@ public:
TensorInfo input_info(input_shape, 1, data_type, input_quantization_info);
TensorInfo weights_info(weights_shape, 1, data_type, input_quantization_info);
TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
- DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, input_quantization_info,
- output_quantization_info, add_bias);
+ DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout,
+ input_quantization_info,
+ output_quantization_info, input_quantization_info, add_bias);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW, unsigned int kernel_size_x, unsigned int kernel_size_y>
+class DeconvolutionValidationQuantizedPerChannelFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW>
+{
+public:
+ void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
+ unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias,
+ DataType weights_data_type)
+ {
+ const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
+ const TensorShape bias_shape(num_kernels);
+ const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
+ auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info);
+ TensorInfo input_info(input_shape, 1, data_type, input_quantization_info);
+ TensorInfo weights_info(weights_shape, 1, weights_data_type, input_quantization_info);
+ TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
+
+ std::vector<float> weights_scales{};
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> dis(0.01f, 1.f);
+ for(size_t i = 0; i < output_shape[2]; ++i)
+ {
+ weights_scales.push_back(dis(gen));
+ }
+ DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, weights_data_type, data_layout,
+ input_quantization_info,
+ output_quantization_info, QuantizationInfo(weights_scales), add_bias);
}
};
diff --git a/tests/validation/fixtures/DepthConvertLayerFixture.h b/tests/validation/fixtures/DepthConvertLayerFixture.h
index 3fe12709c8..f55d20bf3e 100644
--- a/tests/validation/fixtures/DepthConvertLayerFixture.h
+++ b/tests/validation/fixtures/DepthConvertLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DepthConvertLayerValidationBaseFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, QuantizationInfo quantization_info)
{
_shift = shift;
@@ -61,14 +60,14 @@ protected:
if(is_data_type_quantized(tensor.data_type()))
{
std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, i);
}
else
{
- // When converting S32 to F16, both reference and NEON implementations are + or - infinity outside the F16 range.
- if(dt_in==DataType::S32 && dt_out==DataType::F16)
+ // When converting S32 to F16, both reference and Compute Library implementations are + or - infinity outside the F16 range.
+ if(dt_in == DataType::S32 && dt_out == DataType::F16)
{
std::uniform_int_distribution<int32_t> distribution_s32(-65504, 65504);
library->fill(tensor, distribution_s32, i);
@@ -90,15 +89,15 @@ protected:
FunctionType depth_convert;
depth_convert.configure(&src, &dst, policy, shift);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0, dt_in, dt_out);
@@ -130,7 +129,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DepthConvertLayerValidationFixture : public DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy,
@@ -142,7 +140,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DepthConvertLayerValidationQuantizedFixture : public DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, QuantizationInfo quantization_info)
{
DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy,
diff --git a/tests/validation/fixtures/DepthToSpaceLayerFixture.h b/tests/validation/fixtures/DepthToSpaceLayerFixture.h
index b42bed52a7..abe3d8b22f 100644
--- a/tests/validation/fixtures/DepthToSpaceLayerFixture.h
+++ b/tests/validation/fixtures/DepthToSpaceLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DepthToSpaceLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, int32_t block_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout)
{
_target = compute_target(input_shape, block_shape, output_shape, data_type, data_layout);
@@ -50,7 +49,10 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
TensorType compute_target(TensorShape input_shape, int32_t block_shape, TensorShape output_shape,
@@ -70,15 +72,15 @@ protected:
FunctionType depth_to_space;
depth_to_space.configure(&input, &output, block_shape);
- ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
// Allocate tensors
input.allocator()->allocate();
output.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
// Fill tensors
fill(AccessorType(input), 0);
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 7016e9fb68..6e2e3a3846 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE
-#define ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DEPTHWISECONVOLUTIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DEPTHWISECONVOLUTIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -38,6 +38,7 @@
#include "utils/Utils.h"
+#include <cstdint>
#include <random>
namespace arm_compute
@@ -54,31 +55,212 @@ class DepthwiseConvolutionLayerValidationGenericFixture : public framework::Fixt
public:
using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
+ void setup_quantization(TensorShape input_shape, TensorShape weights_shape, QuantizationInfo &input_q_info,
+ QuantizationInfo &weights_q_info, DataType data_type)
+ {
+ ARM_COMPUTE_UNUSED(input_shape);
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ _input_quantization_info = QuantizationInfo(scale_lhs, offset_lhs);
+ _weights_quantization_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info,
+ weights_shape.y() /* heights */, weights_shape.x() /* width */, 1 /* channels */,
+ data_type, 0.5f /* bias_fraction */);
+
+ _output_quantization_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+ }
+
public:
- template <typename...>
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation,
unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type,
QuantizationInfo input_quantization_info, QuantizationInfo weights_quantization_info, QuantizationInfo output_quantization_info,
- DataLayout data_layout, ActivationLayerInfo act_info)
+ DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false, bool in_place = false, bool run_twice = false)
{
- const DataType bias_data_type = is_data_type_quantized(input_data_type) ? DataType::S32 : input_data_type;
+ ARM_COMPUTE_ERROR_ON(mixed_layout && in_place);
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = in_shape[0] + in_shape[1] + in_shape[2] + in_shape[3] +
+ kernel_size.width + kernel_size.height + dilation.x() +
+ dilation.y() + pad_stride_info.pad_bottom() + pad_stride_info.pad_left() + pad_stride_info.pad_right() + pad_stride_info.pad_top();
+
+ _mixed_layout = mixed_layout;
+ _input_shape = in_shape;
+ _input_data_type = input_data_type;
+ _weights_data_type = weights_data_type;
+ _data_layout = data_layout;
+ _pad_stride_info = pad_stride_info;
+ _act_info = act_info;
+ _depth_multiplier = depth_multiplier;
+ _dilation = dilation;
+ _in_place = in_place;
+ _run_twice = run_twice;
+
+ _bias_data_type = is_data_type_quantized(_input_data_type) ? DataType::S32 : _input_data_type;
+
+ _weights_shape = TensorShape(kernel_size.width, kernel_size.height);
+
+ const TensorInfo in_info(_input_shape, 1, _input_data_type);
+ const TensorInfo we_info(_weights_shape, 1, _weights_data_type);
+ const ConvolutionInfo info{ _pad_stride_info, _depth_multiplier, _act_info, _dilation };
+ _output_shape = compute_depthwise_convolution_shape(in_info, we_info, info);
+
+ _weights_shape.set(2, _output_shape.z());
+ _biases_shape = TensorShape(_weights_shape[2]);
+
+ _input_quantization_info = input_quantization_info;
+ _weights_quantization_info = weights_quantization_info;
+ _output_quantization_info = output_quantization_info;
+
+ if(is_data_type_quantized(_input_data_type) && !is_data_type_quantized_symmetric(weights_data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(in_shape, _weights_shape, _input_quantization_info, _weights_quantization_info, _input_data_type);
+ _use_dynamic_output_quant = true;
+ }
+ }
- TensorShape weights_shape(kernel_size.width, kernel_size.height);
+ void configure_target()
+ {
+ TensorShape input_shape = _input_shape;
+ TensorShape weights_shape = _weights_shape;
+ TensorShape output_shape = _output_shape;
- const TensorInfo in_info(in_shape, 1, input_data_type);
- const TensorInfo we_info(weights_shape, 1, weights_data_type);
- const TensorShape out_shape = compute_depthwise_convolution_shape(in_info, we_info, pad_stride_info, depth_multiplier, dilation);
+ if(_data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+ }
- weights_shape.set(2, out_shape.z());
- const TensorShape biases_shape(weights_shape[2]);
+ // Create tensors
+ _src = create_tensor<TensorType>(input_shape, _input_data_type, 1, _input_quantization_info, _data_layout);
+ _weights = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout);
+ if(_run_twice) {
+ _weights.info()->set_are_values_constant(false);
+ }
+ _biases = create_tensor<TensorType>(_biases_shape, _bias_data_type, 1, _input_quantization_info, _data_layout);
+ TensorType *target_to_use = nullptr;
+ if(!_in_place)
+ {
+ _target = create_tensor<TensorType>(output_shape, _input_data_type, 1, _output_quantization_info, _data_layout);
+ target_to_use = &_target;
+ }
+
+ add_padding_x({ &_src, &_biases }, _data_layout);
+ add_padding_x({ &_weights }, _data_layout, true);
+ if(!_in_place)
+ {
+ add_padding_x({ &_target }, _data_layout);
+ }
+
+ // Create Depthwise Convolution configure function
+ _dwc.configure(&_src, &_weights, &_biases, target_to_use, _pad_stride_info, _depth_multiplier, _act_info, _dilation);
- _target = compute_target(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier,
- input_data_type, weights_data_type, bias_data_type, input_quantization_info, weights_quantization_info, output_quantization_info, data_layout, act_info);
- _reference = compute_reference(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier,
- input_data_type, weights_data_type, bias_data_type, input_quantization_info, weights_quantization_info, output_quantization_info, act_info);
+ ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
+ }
+
+ void allocate_and_run_target()
+ {
+ // Allocate tensors
+ _src.allocator()->allocate();
+ _weights.allocator()->allocate();
+ _biases.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
+
+ if(!_in_place)
+ {
+ _target.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
+ }
+
+ // Fill tensors
+ fill(AccessorType(_src), 0 + _hash);
+ fill(AccessorType(_weights), 1 + _hash);
+ fill(AccessorType(_biases), 2 + _hash);
+
+ // Run with variable input
+ if(_run_twice) {
+ _dwc.run();
+
+ // Fill tensors with a new seed
+ fill(AccessorType(_src), 3 + _hash);
+ fill(AccessorType(_weights), 4 + _hash);
+ fill(AccessorType(_biases), 5 + _hash);
+ }
+
+ if(_mixed_layout)
+ {
+ mix_layout(_dwc, _src, _target);
+ }
+ else
+ {
+ // Compute function
+ _dwc.run();
+ }
+ }
+
+ void compute_reference()
+ {
+ SimpleTensor<T> src{ _input_shape, _input_data_type, 1, _input_quantization_info };
+ SimpleTensor<TW> weights{ _weights_shape, _weights_data_type, 1, _weights_quantization_info };
+ SimpleTensor<TBias> biases{ _biases_shape, _bias_data_type, 1, _input_quantization_info };
+
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(biases, 2 + _hash);
+
+ if(_run_twice) {
+ SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info, _depth_multiplier, _dilation, _output_quantization_info);
+ if(_act_info.enabled()) {
+ reference::activation_layer<T>(depth_out, _act_info);
+ }
+
+ fill(src, 3 + _hash);
+ fill(weights, 4 + _hash);
+ fill(biases, 5 + _hash);
+ }
+
+ SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info, _depth_multiplier, _dilation, _output_quantization_info);
+ _reference = (_act_info.enabled()) ? reference::activation_layer<T>(depth_out, _act_info) : depth_out;
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ ARM_COMPUTE_ERROR_ON(_in_place);
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -86,27 +268,77 @@ protected:
{
case DataType::QASYMM8:
{
- std::uniform_int_distribution<uint8_t> distribution(0, 10);
- library->fill(tensor, distribution, i);
+ if(_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(0, 255);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
break;
}
case DataType::QASYMM8_SIGNED:
+ {
+ if(_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(-128, 127);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ }
case DataType::QSYMM8_PER_CHANNEL:
{
- std::uniform_int_distribution<int8_t> distribution(-10, 10);
+ int min_bound = 128;
+ int max_bound = -127;
+ for(size_t i = 0; i < _weights_quantization_info.scale().size(); i++)
+ {
+ std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
+ if(bounds.first < min_bound)
+ {
+ min_bound = bounds.first;
+ }
+ if(bounds.second > max_bound)
+ {
+ max_bound = bounds.second;
+ }
+ }
+ std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::BFLOAT16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{ -1.0f, 1.0f };
library->fill(tensor, distribution, i);
break;
}
- case DataType::F32:
case DataType::F16:
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
library->fill(tensor, distribution, i);
break;
}
- case DataType::S32:
+ case DataType::F32:
{
- std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -115,88 +347,56 @@ protected:
}
}
- TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape output_shape, PadStrideInfo &pad_stride_info, Size2D dilation,
- unsigned int depth_multiplier, const DataType input_data_type, const DataType weights_data_type, const DataType bias_data_type,
- const QuantizationInfo &input_quantization_info, const QuantizationInfo &weights_quantization_info, const QuantizationInfo &output_quantization_info,
- const DataLayout data_layout, const ActivationLayerInfo &act_info)
- {
- if(data_layout == DataLayout::NHWC)
- {
- permute(input_shape, PermutationVector(2U, 0U, 1U));
- permute(weights_shape, PermutationVector(2U, 0U, 1U));
- permute(output_shape, PermutationVector(2U, 0U, 1U));
- }
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, input_data_type, 1, input_quantization_info, data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, weights_data_type, 1, weights_quantization_info, data_layout);
- TensorType biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, input_quantization_info, data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, input_data_type, 1, output_quantization_info, data_layout);
-
- // Create Depthwise Convolution configure function
- FunctionType dwc;
- dwc.configure(&src, &weights, &biases, &dst, pad_stride_info, depth_multiplier, act_info, dilation);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(biases.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- biases.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!biases.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(biases), 2);
-
- // Compute function
- dwc.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &in_shape, const TensorShape &weights_shape, const TensorShape &biases_shape, const TensorShape &out_shape,
- const PadStrideInfo &pad_stride_info, const Size2D &dilation, unsigned int depth_multiplier,
- const DataType input_data_type, const DataType weights_data_type, const DataType bias_data_type,
- const QuantizationInfo &input_quantization_info, const QuantizationInfo &weights_quantization_info, const QuantizationInfo &output_quantization_info,
- const ActivationLayerInfo &act_info)
- {
- SimpleTensor<T> src{ in_shape, input_data_type, 1, input_quantization_info };
- SimpleTensor<TW> weights{ weights_shape, weights_data_type, 1, weights_quantization_info };
- SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, input_quantization_info };
-
- fill(src, 0);
- fill(weights, 1);
- fill(biases, 2);
-
- SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, out_shape, pad_stride_info, depth_multiplier, dilation, output_quantization_info);
- return (act_info.enabled()) ? reference::activation_layer<T>(depth_out, act_info) : depth_out;
- }
-
TensorType _target{};
SimpleTensor<T> _reference{};
+
+ TensorType _src{};
+ TensorType _weights{};
+ TensorType _biases{};
+ FunctionType _dwc{};
+
+ TensorShape _input_shape{};
+ TensorShape _weights_shape{};
+ TensorShape _biases_shape{};
+ TensorShape _output_shape{};
+ DataType _input_data_type{};
+ DataType _weights_data_type{};
+ DataType _bias_data_type{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _weights_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
+ DataLayout _data_layout{};
+ PadStrideInfo _pad_stride_info{};
+ ActivationLayerInfo _act_info{};
+ unsigned int _depth_multiplier{};
+ Size2D _dilation{};
+ bool _mixed_layout{ false };
+ bool _in_place{ false };
+ bool _run_twice{ false };
+ bool _use_dynamic_output_quant{false};
+
+ int32_t _hash{0};
+ // Random initialization limits
+ // Default values are previously handcrafted limits
+ // that sould be used when we don't use dynamic quantization
+ int32_t _min_bias{-100};
+ int32_t _max_bias{100};
+ int32_t _min_u8{0};
+ int32_t _max_u8{50};
+ int32_t _min_s8{-25};
+ int32_t _max_s8{25};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false, bool in_place = false, bool run_twice = false>
class DepthwiseConvolutionLayerValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, DataLayout data_layout,
ActivationLayerInfo act_info)
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
data_type, data_type, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(),
- data_layout, act_info);
+ data_layout, act_info, mixed_layout, in_place, run_twice);
}
};
@@ -204,260 +404,362 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DepthwiseConvolutionLayerNativeValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type,
DataLayout data_layout)
{
- const TensorShape src_shape(width, height, channel, batch);
- const TensorShape weights_shape(kernel_size.width, kernel_size.height, channel * depth_multiplier);
- const TensorShape biases_shape(weights_shape.z());
+ _dilation = dilation;
+ _depth_multiplier = depth_multiplier;
+ _data_type = data_type;
+ _data_layout = data_layout;
+
+ _input_shape = TensorShape(width, height, channel, batch);
+ _weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier);
+ _biases_shape = TensorShape(_weights_shape.z());
- PadStrideInfo conv_info;
if(padding_valid)
{
- conv_info = PadStrideInfo();
+ _conv_info = PadStrideInfo(stride.width, stride.height);
}
else
{
- conv_info = calculate_same_pad(src_shape, weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, dilation);
+ _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation);
}
-
- _target = compute_target(src_shape, weights_shape, biases_shape, conv_info, dilation, depth_multiplier, data_type, data_layout);
- _reference = compute_reference(src_shape, weights_shape, biases_shape, conv_info, dilation, depth_multiplier, data_type);
}
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
+ void configure_target()
{
- switch(tensor.data_type())
- {
- case DataType::F32:
- {
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- library->fill_tensor_uniform(tensor, i);
- }
- }
+ TensorShape input_shape = _input_shape;
+ TensorShape weights_shape = _weights_shape;
- TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape biases_shape, PadStrideInfo &conv_info, Size2D dilation,
- unsigned int depth_multiplier, const DataType data_type, const DataLayout data_layout)
- {
- if(data_layout == DataLayout::NHWC)
+ if(_data_layout == DataLayout::NHWC)
{
permute(input_shape, PermutationVector(2U, 0U, 1U));
permute(weights_shape, PermutationVector(2U, 0U, 1U));
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType biases = create_tensor<TensorType>(biases_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(TensorShape(), data_type, 1, QuantizationInfo(), data_layout);
+ _src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _biases = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _target = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout);
- // Create Depthwise Convolution configure function
- FunctionType dwc;
- dwc.configure(&src, &weights, &biases, &dst, conv_info, depth_multiplier, dilation);
+ add_padding_x({ &_src, &_biases, &_target }, _data_layout);
+ add_padding_x({ &_weights }, _data_layout, true);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(biases.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // Create Depthwise Convolution configure function
+ const ConvolutionInfo info
+ {
+ _conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation
+ };
+ _dwc.configure(_src.info(), _weights.info(), _biases.info(), _target.info(), info);
+
+ ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
+ }
+ void allocate_and_run_target()
+ {
// Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- biases.allocator()->allocate();
- dst.allocator()->allocate();
+ _src.allocator()->allocate();
+ _weights.allocator()->allocate();
+ _biases.allocator()->allocate();
+ _target.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!biases.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
// Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(biases), 2);
+ fill(AccessorType(_src), 0);
+ fill(AccessorType(_weights), 1);
+ fill(AccessorType(_biases), 2);
- // Compute function
- dwc.run();
+ arm_compute::ITensorPack pack;
+ pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_0, &_src);
+ pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_1, &_weights);
+ pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_2, &_biases);
+ pack.add_tensor(arm_compute::TensorType::ACL_DST, &_target);
- return dst;
+ // Compute function
+ _dwc.run(pack);
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &biases_shape, const PadStrideInfo &conv_info,
- const Size2D &dilation, unsigned int depth_multiplier, const DataType data_type)
+ void compute_reference()
{
- SimpleTensor<T> src{ input_shape, data_type };
- SimpleTensor<T> weights{ weights_shape, data_type };
- SimpleTensor<T> biases{ biases_shape, data_type };
+ SimpleTensor<T> src{ _input_shape, _data_type };
+ SimpleTensor<T> weights{ _weights_shape, _data_type };
+ SimpleTensor<T> biases{ _biases_shape, _data_type };
fill(src, 0);
fill(weights, 1);
fill(biases, 2);
- const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(input_shape, 1, data_type), TensorInfo(weights_shape, 1, data_type), conv_info,
- depth_multiplier, dilation);
- return reference::depthwise_convolution(src, weights, biases, dst_shape, conv_info, depth_multiplier, dilation);
+ const ConvolutionInfo info{ _conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation };
+ const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info);
+ _reference = reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
}
TensorType _target{};
SimpleTensor<T> _reference{};
+
+ TensorType _src{};
+ TensorType _weights{};
+ TensorType _biases{};
+ FunctionType _dwc{};
+
+ TensorShape _input_shape{};
+ TensorShape _weights_shape{};
+ TensorShape _biases_shape{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+ PadStrideInfo _conv_info{};
+ Size2D _dilation{};
+ unsigned int _depth_multiplier{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool in_place = false>
class DepthwiseConvolutionLayerNativeConfigurableValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type,
- DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0)
+ DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0, bool export_to_cl_image)
{
- const TensorShape src_shape(width, height, channel, batch);
- const TensorShape weights_shape(kernel_size.width, kernel_size.height, channel * depth_multiplier);
- const TensorShape biases_shape(weights_shape.z());
+ _dilation = dilation;
+ _depth_multiplier = depth_multiplier;
+ _data_type = data_type;
+ _data_layout = data_layout;
+ _act_info = act_info;
+ _n0 = n0;
+ _export_to_cl_image = export_to_cl_image;
+ _in_place = in_place;
+
+ _input_shape = TensorShape(width, height, channel, batch);
+ _weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier);
+ _biases_shape = TensorShape(_weights_shape.z());
- PadStrideInfo conv_info;
if(padding_valid)
{
- conv_info = PadStrideInfo();
+ _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation);
}
else
{
- conv_info = calculate_same_pad(src_shape, weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, dilation);
+ _conv_info = PadStrideInfo(stride.width, stride.height);
}
-
- _target = compute_target(src_shape, weights_shape, biases_shape, conv_info, dilation, depth_multiplier, data_type, data_layout, act_info, n0);
- _reference = compute_reference(src_shape, weights_shape, biases_shape, conv_info, dilation, depth_multiplier, data_type, act_info);
}
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
+ void configure_target()
{
- switch(tensor.data_type())
+#if defined(ARM_COMPUTE_OPENCL_ENABLED)
+ if(_export_to_cl_image)
{
- case DataType::F32:
- {
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::F16:
- {
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- library->fill_tensor_uniform(tensor, i);
+ _validate_output &= image2d_from_buffer_supported(CLKernelLibrary::get().get_device());
+ _validate_output &= (get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) != 0);
}
- }
+#endif // ARM_COMPUTE_OPENCL_ENABLED
- TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape biases_shape, PadStrideInfo &conv_info, Size2D dilation,
- unsigned int depth_multiplier, const DataType data_type, const DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0)
- {
- if(data_layout == DataLayout::NHWC)
+ if(!_validate_output)
+ {
+ return;
+ }
+
+ TensorShape input_shape = _input_shape;
+ TensorShape weights_shape = _weights_shape;
+
+ if(_data_layout == DataLayout::NHWC)
{
permute(input_shape, PermutationVector(2U, 0U, 1U));
permute(weights_shape, PermutationVector(2U, 0U, 1U));
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType biases = create_tensor<TensorType>(biases_shape, data_type, 1, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(TensorShape(), data_type, 1, QuantizationInfo(), data_layout);
+ _src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _biases = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType *target_to_use = nullptr;
+ if(!_in_place)
+ {
+ _target = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout);
+ target_to_use = &_target;
+ }
- DWCWeightsKernelInfo dwc_weights_info;
- dwc_weights_info.n0 = n0;
+ DWCComputeKernelInfo dwc_info;
+ dwc_info.n0 = _n0;
+ dwc_info.m0 = _conv_info.stride().first == 1 && _dilation.x() == 1 ? 8 : 1;
+ dwc_info.export_input_to_cl_image = false;
+ dwc_info.export_weights_to_cl_image = _export_to_cl_image;
- DWCKernelInfo dwc_info;
- dwc_info.activation_info = act_info;
+ const ConvolutionInfo conv_kernel_info
+ {
+ _conv_info, _depth_multiplier, _act_info, _dilation
+ };
+
+ add_padding_x({ &_src, &_biases, &_target }, _data_layout);
+ add_padding_x({ &_weights }, _data_layout, _export_to_cl_image); // Don't add left padding if cl image will be used
// Create Depthwise Convolution configure function
- FunctionType dwc;
- dwc.configure(&src, &weights, &biases, &dst, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation);
+ _dwc.configure(&_src, &_weights, &_biases, target_to_use, dwc_info, conv_kernel_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(biases.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
+ }
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- biases.allocator()->allocate();
- dst.allocator()->allocate();
+ void allocate_and_run_target()
+ {
+ if(!_validate_output)
+ {
+ return;
+ }
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!biases.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // Allocate tensors
+ _src.allocator()->allocate();
+ _weights.allocator()->allocate();
+ _biases.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
+ if(!_in_place)
+ {
+ _target.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
+ }
// Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(biases), 2);
+ fill(AccessorType(_src), 0);
+ fill(AccessorType(_weights), 1);
+ fill(AccessorType(_biases), 2);
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ _src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ if(!_in_place)
+ {
+ _target.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ }
// Compute function
- dwc.run();
+ _dwc.run();
- return dst;
+ // Reinstating original data layout for the test suite to properly check the values
+ if(!_in_place)
+ {
+ _target.info()->set_data_layout(_data_layout);
+ }
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &biases_shape, const PadStrideInfo &conv_info,
- const Size2D &dilation, unsigned int depth_multiplier, const DataType data_type, const ActivationLayerInfo &act_info)
+ void compute_reference()
{
- SimpleTensor<T> src{ input_shape, data_type };
- SimpleTensor<T> weights{ weights_shape, data_type };
- SimpleTensor<T> biases{ biases_shape, data_type };
+ if(!_validate_output)
+ {
+ return;
+ }
+
+ SimpleTensor<T> src{ _input_shape, _data_type };
+ SimpleTensor<T> weights{ _weights_shape, _data_type };
+ SimpleTensor<T> biases{ _biases_shape, _data_type };
fill(src, 0);
fill(weights, 1);
fill(biases, 2);
- const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(input_shape, 1, data_type), TensorInfo(weights_shape, 1, data_type), conv_info,
- depth_multiplier, dilation);
- return reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, conv_info, depth_multiplier, dilation), act_info);
+ const ConvolutionInfo info{ _conv_info, _depth_multiplier, _act_info, _dilation };
+ const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info);
+ _reference = reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation), _act_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
}
TensorType _target{};
SimpleTensor<T> _reference{};
+
+ TensorType _src{};
+ TensorType _weights{};
+ TensorType _biases{};
+ FunctionType _dwc{};
+
+ TensorShape _input_shape{};
+ TensorShape _weights_shape{};
+ TensorShape _biases_shape{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+ PadStrideInfo _conv_info{};
+ ActivationLayerInfo _act_info{};
+ Size2D _dilation{};
+ unsigned int _depth_multiplier{};
+ unsigned int _n0{};
+ bool _export_to_cl_image{};
+ bool _validate_output{ true };
+ bool _in_place{ false };
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false, bool in_place = false>
class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type,
QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type,
data_type, input_quantization_info, input_quantization_info, output_quantization_info,
- data_layout, act_info);
+ data_layout, act_info, mixed_layout, in_place);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW, bool in_place = false>
class DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
{
public:
- template <typename...>
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type,
QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
{
const float out_scale = output_quantization_info.uniform().scale;
const float in_scale = input_quantization_info.uniform().scale;
- std::vector<float> weights_scales{};
- std::mt19937 gen(library->seed());
- std::uniform_real_distribution<> dis(0.01f, out_scale / in_scale);
+ std::vector<float> weights_scales{};
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> dis(0.01f, out_scale / in_scale);
for(size_t i = 0; i < in_shape.z() * depth_multiplier; ++i)
{
weights_scales.push_back(dis(gen));
@@ -466,10 +768,10 @@ public:
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
input_data_type, weights_data_type,
input_quantization_info, QuantizationInfo(weights_scales), output_quantization_info,
- data_layout, act_info);
+ data_layout, act_info, false, in_place);
}
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DEPTHWISECONVOLUTIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index 3699613a39..4eb25a5bc5 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DequantizationValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType src_data_type, DataType dst_datatype, DataLayout data_layout)
{
_quantization_info = generate_quantization_info(src_data_type, shape.z());
@@ -77,15 +76,15 @@ protected:
FunctionType dequantization_layer;
dequantization_layer.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/DerivativeFixture.h b/tests/validation/fixtures/DerivativeFixture.h
deleted file mode 100644
index 2df3340e41..0000000000
--- a/tests/validation/fixtures/DerivativeFixture.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_DERIVATIVE_FIXTURE
-#define ARM_COMPUTE_TEST_DERIVATIVE_FIXTURE
-
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/Types.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Derivative.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename U>
-class DerivativeValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, BorderMode border_mode, Format format, GradientDimension gradient_dimension)
- {
- // Generate a random constant value
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> int_dist(0, 255);
- const uint8_t constant_border_value = int_dist(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, border_mode, format, constant_border_value, gradient_dimension);
- _reference = compute_reference(shape, border_mode, format, constant_border_value, gradient_dimension);
- }
-
-protected:
- template <typename V>
- void fill(V &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- template <typename V>
- void fill_zero(V &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0, static_cast<U>(0), static_cast<U>(0));
- }
-
- std::pair<TensorType, TensorType> compute_target(const TensorShape &shape, BorderMode border_mode, Format format, uint8_t constant_border_value, GradientDimension gradient_dimension)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type_from_format(format));
- TensorType dst_x = create_tensor<TensorType>(shape, data_type_from_format(Format::S16));
- TensorType dst_y = create_tensor<TensorType>(shape, data_type_from_format(Format::S16));
-
- src.info()->set_format(format);
- dst_x.info()->set_format(Format::S16);
- dst_y.info()->set_format(Format::S16);
-
- FunctionType derivative;
-
- switch(gradient_dimension)
- {
- case GradientDimension::GRAD_X:
- derivative.configure(&src, &dst_x, nullptr, border_mode, constant_border_value);
- break;
- case GradientDimension::GRAD_Y:
- derivative.configure(&src, nullptr, &dst_y, border_mode, constant_border_value);
- break;
- case GradientDimension::GRAD_XY:
- derivative.configure(&src, &dst_x, &dst_y, border_mode, constant_border_value);
- break;
- default:
- ARM_COMPUTE_ERROR("Gradient dimension not supported");
- }
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_y.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst_x.allocator()->allocate();
- dst_y.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst_y.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
- fill_zero(AccessorType(dst_x));
- fill_zero(AccessorType(dst_y));
-
- // Compute function
- derivative.run();
-
- return std::make_pair(std::move(dst_x), std::move(dst_y));
- }
-
- std::pair<SimpleTensor<U>, SimpleTensor<U>> compute_reference(const TensorShape &shape, BorderMode border_mode, Format format, uint8_t constant_border_value, GradientDimension gradient_dimension)
- {
- // Create reference
- SimpleTensor<T> src{ shape, format };
-
- // Fill reference
- fill(src);
-
- return reference::derivative<U>(src, border_mode, constant_border_value, gradient_dimension);
- }
-
- BorderMode _border_mode{ BorderMode::UNDEFINED };
- std::pair<TensorType, TensorType> _target{};
- std::pair<SimpleTensor<U>, SimpleTensor<U>> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_DERIVATIVE_FIXTURE */
diff --git a/tests/validation/fixtures/DilateFixture.h b/tests/validation/fixtures/DilateFixture.h
deleted file mode 100644
index aa531a10fe..0000000000
--- a/tests/validation/fixtures/DilateFixture.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_DILATE_FIXTURE
-#define ARM_COMPUTE_TEST_DILATE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Dilate.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DilateValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, data_type, border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType dilate;
- dilate.configure(&src, &dst, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- dilate.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::dilate<T>(src, border_mode, constant_border_value);
- }
-
- BorderMode _border_mode{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_DILATE_FIXTURE */
diff --git a/tests/validation/fixtures/DirectConvolution3DFixture.h b/tests/validation/fixtures/DirectConvolution3DFixture.h
new file mode 100644
index 0000000000..e80ad2f54f
--- /dev/null
+++ b/tests/validation/fixtures/DirectConvolution3DFixture.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTION3DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTION3DFIXTURE_H
+
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/Conv3D.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DirectConvolution3DValidationGenericFixture : public framework::Fixture
+{
+public:
+ using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
+
+ void setup(const TensorShape &input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth,
+ unsigned int num_kernels, bool has_bias, const ActivationLayerInfo &act_info, const DataType &data_type, const DataLayout &data_layout,
+ const QuantizationInfo &src_qinfo = QuantizationInfo(), const QuantizationInfo &weights_qinfo = QuantizationInfo(), const QuantizationInfo &dst_qinfo = QuantizationInfo())
+ {
+ ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NDHWC);
+
+ const TensorShape weights_shape(num_kernels, input_shape[0], kernel_width, kernel_height, kernel_depth);
+ const TensorShape bias_shape(num_kernels);
+ const DataType bias_data_type = is_data_type_quantized(data_type) ? DataType::S32 : data_type;
+ const Conv3dInfo conv3d_info(Size3D(stride_x, stride_y, stride_z), Padding3D(pad_x, pad_y, pad_z), act_info, Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false);
+ const TensorShape output_shape = compute_conv3d_shape(input_shape, weights_shape, conv3d_info);
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, conv3d_info, has_bias, data_type, bias_data_type, data_layout, src_qinfo, weights_qinfo, dst_qinfo);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, conv3d_info, has_bias, data_type, bias_data_type, src_qinfo, weights_qinfo, dst_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const Conv3dInfo &conv3d_info,
+ bool has_bias, const DataType &data_type, const DataType &bias_data_type, const DataLayout &data_layout, const QuantizationInfo &src_qinfo,
+ const QuantizationInfo &weights_qinfo, const QuantizationInfo &dst_qinfo)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, src_qinfo, data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, weights_qinfo, data_layout);
+ TensorType bias = has_bias ? create_tensor<TensorType>(bias_shape, bias_data_type, 1, QuantizationInfo()) : TensorType();
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, dst_qinfo, data_layout);
+
+ // Create and configure function
+ FunctionType conv{};
+ conv.configure(&src, &weights, has_bias ? &bias : nullptr, &dst, conv3d_info);
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(src), 0);
+ fill(AccessorType(weights), 1);
+
+ if(has_bias)
+ {
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ bias.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ fill(AccessorType(bias), 2);
+ }
+
+ // Compute Direct Convolution 3D function
+ conv.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
+ const Conv3dInfo &conv3d_info, bool has_bias, const DataType &data_type, const DataType &bias_data_type, const QuantizationInfo &src_qinfo,
+ const QuantizationInfo &weights_qinfo, const QuantizationInfo &dst_qinfo)
+ {
+ // Create reference
+ SimpleTensor<T> src{ input_shape, data_type, 1, src_qinfo };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, weights_qinfo };
+ SimpleTensor<TBias> bias{ bias_shape, bias_data_type };
+ SimpleTensor<T> dst{ output_shape, data_type, 1, dst_qinfo };
+
+ // Fill reference
+ fill(src, 0);
+ fill(weights, 1);
+
+ if(has_bias)
+ {
+ fill(bias, 2);
+ }
+
+ return reference::activation_layer(reference::conv3d<T, TBias>(src, weights, bias, dst, conv3d_info), conv3d_info.act_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DirectConvolution3DValidationFixture : public DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth,
+ unsigned int num_kernels, bool has_bias, ActivationLayerInfo act_info, DataType data_type, DataLayout data_layout)
+ {
+ DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, stride_z, pad_x, pad_y, pad_z, kernel_width, kernel_height,
+ kernel_depth, num_kernels, has_bias, act_info, data_type, data_layout);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DirectConvolution3DValidationQuantizedFixture : public DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth,
+ unsigned int num_kernels, bool has_bias, ActivationLayerInfo act_info, DataType data_type, DataLayout data_layout, QuantizationInfo src_qinfo, QuantizationInfo weights_qinfo,
+ QuantizationInfo dst_qinfo)
+ {
+ DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, stride_z, pad_x, pad_y, pad_z, kernel_width, kernel_height,
+ kernel_depth, num_kernels, has_bias, act_info, data_type, data_layout, src_qinfo,
+ weights_qinfo, dst_qinfo);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTION3DFIXTURE_H
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index c4e4180900..6f204642ca 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,6 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H
+
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -51,15 +55,54 @@ class DirectConvolutionValidationGenericFixture : public framework::Fixture
public:
using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
-public:
- template <typename...>
+ void setup_quantization(const TensorShape &input_shape, const TensorShape &weights_shape, QuantizationInfo &input_q_info,
+ QuantizationInfo &weights_q_info, DataType data_type)
+ {
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ input_q_info = QuantizationInfo(scale_lhs, offset_lhs);
+ weights_q_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info,
+ weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */,
+ data_type, 0.5f /* bias_fraction */);
+
+ _dst_q_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+
+ // Do not change here as these limits are the natural limits of the associated data types and
+ // are embeded in the computation of the dst quantization info.
+ _min_u8 = 0;
+ _max_u8 = 255;
+ _min_s8 = -128;
+ _max_s8 = 127;
+ }
+
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
- DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
{
- ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
+ stride_x + stride_y + pad_x + pad_y + kernel_size + num_kernels + mixed_layout
+ + (data_layout == DataLayout::NHWC);
- _quantization_info = quantization_info;
_data_type = data_type;
+ _mixed_layout = mixed_layout;
TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
@@ -71,27 +114,66 @@ public:
const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
+ QuantizationInfo input_q_info = quantization_info;
+ QuantizationInfo weights_q_info = quantization_info;
+ _dst_q_info = quantization_info;
+
+ if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type);
+ }
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info);
}
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
{
ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
ARM_COMPUTE_UNUSED(dilation);
- _quantization_info = quantization_info;
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
+ weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] + dilation.x() +
+ dilation.y() + info.pad_bottom() + info.pad_left() + info.pad_right() + info.pad_top();
+
_data_type = data_type;
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
+ QuantizationInfo input_q_info = quantization_info;
+ QuantizationInfo weights_q_info = quantization_info;
+ _dst_q_info = quantization_info;
+
+ if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type);
+ }
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info);
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -99,27 +181,32 @@ protected:
{
case DataType::QASYMM8:
{
- std::uniform_int_distribution<uint8_t> distribution(0, 50);
+ std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8);
library->fill(tensor, distribution, i);
break;
}
case DataType::QASYMM8_SIGNED:
{
// Use small input range to avoid all the test results being saturated at the end.
- std::uniform_int_distribution<int8_t> distribution(-25, 25);
+ std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8);
library->fill(tensor, distribution, i);
break;
}
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
case DataType::S32:
{
- std::uniform_int_distribution<int32_t> distribution(-5, 5);
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
library->fill(tensor, distribution, i);
break;
}
@@ -129,7 +216,7 @@ protected:
}
TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
+ DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
{
if(data_layout == DataLayout::NHWC)
{
@@ -139,19 +226,22 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
- TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_q_info, data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, weights_q_info, data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, QuantizationInfo());
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, _dst_q_info, data_layout);
+
+ add_padding_x({ &src, &bias, &dst }, data_layout);
+ add_padding_x({ &weights }, data_layout, input_shape[0] % 4 == 0); // Don't add left padding if cl image will be used
// Create and configure function
FunctionType conv;
conv.configure(&src, &weights, &bias, &dst, info, act_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -159,67 +249,86 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(bias), 2);
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(weights), 1 + _hash);
+ fill(AccessorType(bias), 2 + _hash);
- // Compute NEConvolutionLayer function
- conv.run();
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+ DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
+ SimpleTensor<T> src{ input_shape, data_type, 1, input_q_info };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, weights_q_info };
+ SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, QuantizationInfo() };
// Fill reference
- fill(src, 0);
- fill(weights, 1);
- fill(bias, 2);
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(bias, 2 + _hash);
- SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
- return (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
+ SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info,
+ Size2D(1U, 1U) /* dilation */, 1 /* num_groups */, _dst_q_info);
+ SimpleTensor<T> dst2 = (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
+ return dst2;
}
TensorType _target{};
SimpleTensor<T> _reference{};
- QuantizationInfo _quantization_info{};
+ QuantizationInfo _dst_q_info{};
DataType _data_type{};
+ bool _mixed_layout{ false };
+ int32_t _hash{0};
+
+ // Random initialization limits
+ // Default values are previously handcrafted limits
+ // that sould be used when we don't use dynamic quantization
+ int32_t _min_bias{-5};
+ int32_t _max_bias{5};
+ int32_t _min_u8{0};
+ int32_t _max_u8{50};
+ int32_t _min_s8{-25};
+ int32_t _max_s8{25};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info,
DataLayout data_layout)
{
DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
- act_info, data_layout);
+ act_info, data_layout, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
ActivationLayerInfo act_info, DataLayout data_layout)
{
DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
- act_info, data_layout);
+ act_info, data_layout, mixed_layout);
}
};
@@ -227,7 +336,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
{
@@ -240,7 +348,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
DataType data_type, ActivationLayerInfo act_info)
{
@@ -252,3 +359,5 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
deleted file mode 100644
index bf6f3e2351..0000000000
--- a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
-#include "tests/validation/reference/ConvolutionLayer.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DirectConvolutionValidationGenericTensorShiftFixture : public framework::Fixture
-{
-public:
- using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
-
-public:
- template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
- DataType data_type, QuantizationInfo quantization_info)
- {
- _quantization_info = quantization_info;
- _data_type = data_type;
-
- const TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
- const TensorShape bias_shape(num_kernels);
- const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
- const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info);
- const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
- }
-
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
- DataType data_type, QuantizationInfo quantization_info)
- {
- ARM_COMPUTE_UNUSED(dilation_x, dilation_y);
-
- _quantization_info = quantization_info;
- _data_type = data_type;
-
- const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- switch(tensor.data_type())
- {
- case DataType::QASYMM8:
- {
- std::uniform_int_distribution<uint8_t> distribution(0, 50);
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::F16:
- case DataType::F32:
- {
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::S32:
- {
- std::uniform_int_distribution<int32_t> distribution(-5, 5);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- library->fill_tensor_uniform(tensor, i);
- }
- }
-
- TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info);
- TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info);
-
- TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info);
- TensorType dst1 = create_tensor<TensorType>(output_shape1, data_type, 1, quantization_info);
-
- // Create and configure function
- FunctionType conv;
- conv.configure(&src, &weights, &bias, &dst, info);
- FunctionType conv1;
- conv1.configure(&dst, &weights, &bias, &dst1, info);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst1.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- bias.allocator()->allocate();
- dst.allocator()->allocate();
- dst1.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst1.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(bias), 2);
-
- // Compute NEConvolutionLayer function
- GCScheduler::get().memory_barrier();
- conv.run();
- GCScheduler::get().memory_barrier();
- conv1.run();
-
- return dst1;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
- {
- // Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
-
- SimpleTensor<T> dst{ output_shape, data_type, 1, quantization_info };
- TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info);
-
- // Fill reference
- fill(src, 0);
- fill(weights, 1);
- fill(bias, 2);
-
- dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
- return reference::convolution_layer<T>(dst, weights, bias, output_shape1, info);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- QuantizationInfo _quantization_info{};
- DataType _data_type{};
-
-private:
- TensorShape get_output_shape(TensorShape in_shape, TensorShape kernel_shape, const PadStrideInfo &info)
- {
- TensorShape out_shape(in_shape);
- const std::pair<unsigned int, unsigned int> scaled_dims = scaled_dimensions(in_shape.x(),
- in_shape.y(),
- kernel_shape.x(),
- kernel_shape.y(),
- info);
- out_shape.set(0, scaled_dims.first);
- out_shape.set(1, scaled_dims.second);
- out_shape.set(2, kernel_shape[3]);
- return out_shape;
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DirectConvolutionValidationTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type)
- {
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
- QuantizationInfo());
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DirectConvolutionValidationQuantizedTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info)
- {
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
- quantization_info);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DirectConvolutionValidationWithTensorShapesQuantizedTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
- DataType data_type, QuantizationInfo quantization_info)
- {
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
- quantization_info);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class DirectConvolutionValidationWithTensorShapesTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
- DataType data_type)
- {
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
- QuantizationInfo());
- }
-};
-
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/fixtures/DropoutLayerFixture.h b/tests/validation/fixtures/DropoutLayerFixture.h
index be25802650..a84f2a6407 100644
--- a/tests/validation/fixtures/DropoutLayerFixture.h
+++ b/tests/validation/fixtures/DropoutLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DropoutLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, float ratio, bool forward, DataType data_type)
{
_target = compute_target(shape, ratio, forward, data_type);
@@ -70,17 +69,17 @@ protected:
FunctionType dropout_layer;
dropout_layer.configure(&src, &mask, &dst, ratio, forward);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
mask.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!mask.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!mask.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/ElementWiseUnaryFixture.h b/tests/validation/fixtures/ElementWiseUnaryFixture.h
deleted file mode 100644
index 3f6d5b3cb3..0000000000
--- a/tests/validation/fixtures/ElementWiseUnaryFixture.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
-#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/ElementWiseUnary.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ElementWiseUnaryValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, DataType input_data_type, ElementWiseUnary op)
- {
- _op = op;
- _target = compute_target(input_shape, input_data_type);
- _reference = compute_reference(input_shape, input_data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i, DataType data_type)
- {
- switch(_op)
- {
- case ElementWiseUnary::EXP:
- {
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::RSQRT:
- {
- std::uniform_real_distribution<> distribution(1.0f, 2.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::ABS:
- case ElementWiseUnary::NEG:
- {
- switch(data_type)
- {
- case DataType::F32:
- case DataType::F16:
- {
- std::uniform_real_distribution<> distribution(-2.0f, 2.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::S32:
- {
- std::uniform_int_distribution<int32_t> distribution(-100, 100);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented");
- }
- break;
- }
- case ElementWiseUnary::LOG:
- {
- std::uniform_real_distribution<> distribution(0.0000001f, 100.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::SIN:
- {
- std::uniform_real_distribution<> distribution(-100.00f, 100.00f);
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::ROUND:
- {
- std::uniform_real_distribution<> distribution(100.0f, -100.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType elwiseunary_layer;
-
- elwiseunary_layer.configure(&src, &dst);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0, data_type);
-
- // Compute function
- elwiseunary_layer.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src, 0, data_type);
-
- return reference::elementwise_unary<T>(src, _op);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- ElementWiseUnary _op{};
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class RsqrtValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type)
- {
- ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::RSQRT);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ExpValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type)
- {
- ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::EXP);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class NegValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type)
- {
- ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::NEG);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class LogValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type)
- {
- ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::LOG);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class AbsValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type)
- {
- ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::ABS);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class SinValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type)
- {
- ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::SIN);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class RoundValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type)
- {
- ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::ROUND);
- }
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE */
diff --git a/tests/validation/fixtures/ElementwiseOperationsFixture.h b/tests/validation/fixtures/ElementwiseOperationsFixture.h
index 44c096c521..f36a1f75b7 100644
--- a/tests/validation/fixtures/ElementwiseOperationsFixture.h
+++ b/tests/validation/fixtures/ElementwiseOperationsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,11 +21,12 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_OPERATIONS_FIXTURE
-#define ARM_COMPUTE_TEST_ELEMENTWISE_OPERATIONS_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEOPERATIONSFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEOPERATIONSFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
@@ -45,12 +46,14 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticOperationsGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1,
DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace = false, bool use_dynamic_shape = false)
{
- _op = op;
+ _op = op;
+ _use_dynamic_shape = use_dynamic_shape;
+ _is_inplace = is_inplace;
+
_target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, qinfo0, qinfo1, qinfo_out);
_reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, qinfo0, qinfo1, qinfo_out);
}
@@ -59,16 +62,23 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- switch(_op)
+ if(is_data_type_float(tensor.data_type()))
+ {
+ switch(_op)
+ {
+ case ArithmeticOperation::DIV:
+ library->fill_tensor_uniform_ranged(tensor, i, { std::pair<float, float>(-0.001f, 0.001f) });
+ break;
+ case ArithmeticOperation::POWER:
+ library->fill_tensor_uniform(tensor, i, 0.0f, 5.0f);
+ break;
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+ else
{
- case ArithmeticOperation::DIV:
- library->fill_tensor_uniform_ranged(tensor, i, { std::pair<float, float>(-0.001f, 0.001f) });
- break;
- case ArithmeticOperation::POWER:
- library->fill_tensor_uniform(tensor, i, 0.0f, 5.0f);
- break;
- default:
- library->fill_tensor_uniform(tensor, i);
+ library->fill_tensor_uniform(tensor, i);
}
}
@@ -76,26 +86,67 @@ protected:
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
// Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0);
- TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1);
- TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out);
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(out_shape, output_data_type, 1, qinfo_out);
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if(_is_inplace)
+ {
+ bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out) && (data_type0 == output_data_type);
+ bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out) && (data_type1 == output_data_type);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if(src1_is_inplace)
+ {
+ actual_dst = &ref_src1;
+ }
+ else
+ {
+ actual_dst = &ref_src2;
+ }
+ }
+
+ // if _use_dynamic_shape is true, this fixture will test scenario for dynamic shapes.
+ // - At configure time, all input tensors are marked as dynamic using set_tensor_dynamic()
+ // - After configure, tensors are marked as static for run using set_tensor_static()
+ // - The tensors with static shape are given to run()
+ if(_use_dynamic_shape)
+ {
+ set_tensor_dynamic(ref_src1);
+ set_tensor_dynamic(ref_src2);
+ }
// Create and configure function
FunctionType elem_op;
- elem_op.configure(&ref_src1, &ref_src2, &dst);
+ elem_op.configure(&ref_src1, &ref_src2, actual_dst);
- ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ if(_use_dynamic_shape)
+ {
+ set_tensor_static(ref_src1);
+ set_tensor_static(ref_src2);
+ }
+
+ ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
// Allocate tensors
ref_src1.allocator()->allocate();
ref_src2.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // If don't do in-place computation, still need to allocate original dst
+ if(!_is_inplace)
+ {
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
+
+ ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
// Fill tensors
fill(AccessorType(ref_src1), 0);
@@ -104,7 +155,7 @@ protected:
// Compute function
elem_op.run();
- return dst;
+ return std::move(*actual_dst);
}
SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1,
@@ -126,6 +177,8 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
ArithmeticOperation _op{ ArithmeticOperation::ADD };
+ bool _use_dynamic_shape{ false };
+ bool _is_inplace{ false };
};
// Arithmetic operation fused with activation function
@@ -133,15 +186,15 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticOperationsFuseActivationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1,
DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool is_inplace = true)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape0, shape1,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
- _act_info = act_info;
+ qinfo0, qinfo1, qinfo_out, is_inplace);
+ _act_info = act_info;
+ _is_inplace = is_inplace;
}
protected:
@@ -149,26 +202,51 @@ protected:
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
// Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0);
- TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1);
- TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out);
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(out_shape, output_data_type, 1, qinfo_out);
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if(_is_inplace)
+ {
+ bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out) && (data_type0 == output_data_type);
+ bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out) && (data_type1 == output_data_type);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if(src1_is_inplace)
+ {
+ actual_dst = &ref_src1;
+ }
+ else
+ {
+ actual_dst = &ref_src2;
+ }
+ }
// Create and configure function
FunctionType elem_op;
- elem_op.configure(&ref_src1, &ref_src2, &dst, _act_info);
+ elem_op.configure(&ref_src1, &ref_src2, actual_dst, _act_info);
- ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
// Allocate tensors
ref_src1.allocator()->allocate();
ref_src2.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // If don't do in-place computation, still need to allocate original dst
+ if(!_is_inplace)
+ {
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
+
+ ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
// Fill tensors
fill(AccessorType(ref_src1), 0);
@@ -177,7 +255,7 @@ protected:
// Compute function
elem_op.run();
- return dst;
+ return std::move(*actual_dst);
}
SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1,
@@ -190,18 +268,18 @@ protected:
}
ActivationLayerInfo _act_info{};
+ bool _is_inplace{ false };
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ArithmeticDivisionBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -209,12 +287,35 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticDivisionValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticDivisionBroadcastDynamicShapeValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
+ {
+ ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace, true /* use_dynamic_shape */);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticDivisionDynamicShapeValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
+ {
+ ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace, true /* use_dynamic_shape */);
}
};
@@ -222,12 +323,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticDivisionBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -235,12 +335,23 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticDivisionValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticDivisionValidationIntegerFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -248,14 +359,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ArithmeticDivisionValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
+ qinfo0, qinfo1, qinfo_out, is_inplace);
}
};
@@ -263,12 +373,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMaxBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -276,12 +385,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMaxValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -289,12 +397,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMaxBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -302,12 +409,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMaxValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -315,14 +421,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMaxValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape, shape,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
+ qinfo0, qinfo1, qinfo_out, is_inplace);
}
};
@@ -330,14 +435,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMaxQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape0, shape1,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
+ qinfo0, qinfo1, qinfo_out, is_inplace);
}
};
@@ -345,12 +449,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMinBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -358,12 +461,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMinValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -371,12 +473,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMinBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -384,12 +485,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMinValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -397,14 +497,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMinValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape, shape,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
+ qinfo0, qinfo1, qinfo_out, is_inplace);
}
};
@@ -412,14 +511,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseMinQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape0, shape1,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
+ qinfo0, qinfo1, qinfo_out, is_inplace);
}
};
@@ -427,12 +525,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseSquaredDiffBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -440,12 +537,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseSquaredDiffValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -453,12 +549,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseSquaredDiffBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -466,12 +561,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseSquaredDiffValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -479,14 +573,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseSquaredDiffValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape, shape,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
+ qinfo0, qinfo1, qinfo_out, is_inplace);
}
};
@@ -494,14 +587,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwiseSquaredDiffQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape0, shape1,
data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
+ qinfo0, qinfo1, qinfo_out, is_inplace);
}
};
@@ -509,7 +601,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PReluLayerBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape0, shape1,
@@ -522,7 +613,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PReluLayerValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape, shape,
@@ -535,7 +625,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PReluLayerValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
@@ -550,7 +639,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PReluLayerQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
@@ -565,12 +653,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwisePowerBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -578,12 +665,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwisePowerValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
{
ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace);
}
};
@@ -591,12 +677,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwisePowerBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape0, shape1,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -604,16 +689,15 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ElementwisePowerValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape, shape,
data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ARITHMETIC_OPERATIONS_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEOPERATIONSFIXTURE_H
diff --git a/tests/validation/fixtures/ElementwiseUnaryFixture.h b/tests/validation/fixtures/ElementwiseUnaryFixture.h
new file mode 100644
index 0000000000..15344288db
--- /dev/null
+++ b/tests/validation/fixtures/ElementwiseUnaryFixture.h
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
+#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
+
+#include "arm_compute/core/QuantizationInfo.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ElementwiseUnary.h"
+
+#include <tuple>
+#include <limits>
+#include <type_traits>
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementWiseUnaryValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape, DataType input_data_type, bool in_place, ElementWiseUnary op,
+ bool use_dynamic_shape = false, QuantizationInfo qinfo = QuantizationInfo(), QuantizationInfo qinfo_out = QuantizationInfo())
+ {
+ _op = op;
+ _target = compute_target(input_shape, input_data_type, in_place, qinfo, qinfo_out);
+ _reference = compute_reference(input_shape, input_data_type, qinfo, qinfo_out);
+ _use_dynamic_shape = use_dynamic_shape;
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, DataType data_type)
+ {
+ using FloatType = typename std::conditional < std::is_same<T, half>::value || std::is_floating_point<T>::value, T, float >::type;
+ using FloatDistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<FloatType>>::type;
+
+ switch(_op)
+ {
+ case ElementWiseUnary::EXP:
+ {
+ switch(data_type)
+ {
+ case DataType::F32:
+ {
+ FloatDistributionType distribution{ FloatType(-86.63f), FloatType(88.36f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(-9.00f), FloatType(10.73f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
+ break;
+ }
+ case ElementWiseUnary::RSQRT:
+ case ElementWiseUnary::LOG:
+ {
+ // For floating-point data type, the chosen input range is all positive numbers
+ // (i.e. positive and negative zeros are excluded).
+ switch(data_type)
+ {
+ case DataType::F32:
+ {
+ FloatDistributionType distribution{ std::numeric_limits<float>::min(), std::numeric_limits<float>::max() };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(0.00006103515625f), FloatType(65504.0f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
+ break;
+ }
+ case ElementWiseUnary::SIN:
+ {
+ switch(data_type)
+ {
+ case DataType::F32:
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(-100.0f), FloatType(100.0f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
+ break;
+ }
+ case ElementWiseUnary::ABS:
+ case ElementWiseUnary::NEG:
+ case ElementWiseUnary::ROUND:
+ {
+ switch(data_type)
+ {
+ case DataType::F32:
+ {
+ FloatDistributionType distribution{ std::numeric_limits<float>::lowest() / 2, std::numeric_limits<float>::max() / 2 };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(-65504.0f), FloatType(65504.0f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+ }
+
+ TensorType compute_target(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo qinfo, QuantizationInfo qinfo_out)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1, qinfo);
+ TensorType dst = create_tensor<TensorType>(shape, data_type, 1, qinfo_out);
+ TensorType *actual_dst = in_place ? &src : &dst;
+
+ // if _use_dynamic_shape is true, this fixture will test scenario for dynamic shapes.
+ // - At configure time, all input tensors are marked as dynamic using set_tensor_dynamic()
+ // - After configure, tensors are marked as static for run using set_tensor_static()
+ // - The tensors with static shape are given to run()
+ if(_use_dynamic_shape)
+ {
+ set_tensor_dynamic(src);
+ }
+
+ // Create and configure function
+ FunctionType elwiseunary_layer;
+ elwiseunary_layer.configure(&src, actual_dst);
+
+ if(_use_dynamic_shape)
+ {
+ set_tensor_static(src);
+ }
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ src.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ if(!in_place)
+ {
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
+
+ // Fill tensors
+ fill(AccessorType(src), 0, data_type);
+
+ // Compute function
+ elwiseunary_layer.run();
+
+ if(in_place)
+ {
+ return src;
+ }
+ else
+ {
+ return dst;
+ }
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo, QuantizationInfo qinfo_out)
+ {
+ // Create reference
+ SimpleTensor<T> src{ shape, data_type, 1, qinfo };
+ SimpleTensor<T> dst{ shape, data_type, 1, qinfo_out };
+
+ // Fill reference
+ fill(src, 0, data_type);
+
+ return reference::elementwise_unary<T>(src, dst, _op);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ ElementWiseUnary _op{};
+ bool _use_dynamic_shape{ false };
+ QuantizationInfo _input_qinfo{};
+ QuantizationInfo _output_qinfo{};
+};
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RsqrtQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo, QuantizationInfo qinfo_out)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::RSQRT, false, qinfo, qinfo_out);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RsqrtValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::RSQRT);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RsqrtDynamicShapeValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::RSQRT, true);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ExpValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::EXP);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ExpQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::EXP, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::NEG);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::NEG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, bool in_place)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, in_place, ElementWiseUnary::NEG);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegQuantizedValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, in_place, ElementWiseUnary::NEG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LogValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::LOG);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LogQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::LOG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class AbsValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ABS);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class AbsQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ABS, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SinValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::SIN);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SinQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::SIN, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RoundValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RoundQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND, false, iq, oq);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE */
diff --git a/tests/validation/fixtures/EqualizeHistogramFixture.h b/tests/validation/fixtures/EqualizeHistogramFixture.h
deleted file mode 100644
index 0d91e6d50b..0000000000
--- a/tests/validation/fixtures/EqualizeHistogramFixture.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_EQUALIZE_HISTOGRAM_FIXTURE
-#define ARM_COMPUTE_TEST_EQUALIZE_HISTOGRAM_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/EqualizeHistogram.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class EqualizeHistogramValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type)
- {
- _target = compute_target(shape, data_type);
- _reference = compute_reference(shape, data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType equalize_histogram;
-
- equalize_histogram.configure(&src, &dst);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- equalize_histogram.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- return reference::equalize_histogram<T>(src);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_EQUALIZE_HISTOGRAM_FIXTURE */
diff --git a/tests/validation/fixtures/ErodeFixture.h b/tests/validation/fixtures/ErodeFixture.h
deleted file mode 100644
index d37cac0298..0000000000
--- a/tests/validation/fixtures/ErodeFixture.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_ERODE_FIXTURE
-#define ARM_COMPUTE_TEST_ERODE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Erode.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ErodeValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, data_type, border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType erode;
- erode.configure(&src, &dst, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- erode.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::erode<T>(src, border_mode, constant_border_value);
- }
-
- BorderMode _border_mode{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ERODE_FIXTURE */
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h
index 1aaa5965b2..024227b22a 100644
--- a/tests/validation/fixtures/FFTFixture.h
+++ b/tests/validation/fixtures/FFTFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class FFTValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
_target = compute_target(shape, data_type);
@@ -59,8 +58,23 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- std::uniform_real_distribution<float> distribution(-5.f, 5.f);
- library->fill(tensor, distribution, 0);
+ switch(tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -5.0f, 5.0f };
+ library->fill(tensor, distribution, 0);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-5.0f, 5.0f);
+ library->fill(tensor, distribution, 0);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, 0);
+ }
}
TensorType compute_target(const TensorShape &shape, DataType data_type)
@@ -73,15 +87,17 @@ protected:
FunctionType fft;
fft.configure(&src, &dst, InfoType());
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({ &src, &dst });
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -117,26 +133,46 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class FFTConvolutionValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
+ DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
{
- _data_type = data_type;
- _data_layout = data_layout;
+ _mixed_layout = mixed_layout;
+ _data_type = data_type;
+ _data_layout = data_layout;
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
_reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
switch(tensor.data_type())
{
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -164,14 +200,16 @@ protected:
TensorType bias = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout);
TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
+
// Create and configure function
FunctionType conv;
- conv.configure(&src, &weights, &bias, &dst, info, act_info);
+ conv.configure(&src, &weights, &bias, &dst, info, act_info, _data_type == DataType::F16);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -179,19 +217,25 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
fill(AccessorType(weights), 1);
fill(AccessorType(bias), 2);
- // Compute convolution function
- conv.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
@@ -218,18 +262,18 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
DataLayout _data_layout{};
+ bool _mixed_layout{ false };
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
{
FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
- data_type, data_layout, act_info);
+ data_type, data_layout, act_info, mixed_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/FastCornersFixture.h b/tests/validation/fixtures/FastCornersFixture.h
deleted file mode 100644
index 6f2add6210..0000000000
--- a/tests/validation/fixtures/FastCornersFixture.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_FAST_CORNERS_FIXTURE
-#define ARM_COMPUTE_TEST_FAST_CORNERS_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/FastCorners.h"
-
-#include <random>
-
-namespace arm_compute
-{
-class CLFastCorners;
-class NEFastCorners;
-
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename ArrayType, typename FunctionType, typename T>
-class FastCornersValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(std::string image, Format format, bool suppress_nonmax, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> int_dist(0, 255);
- std::uniform_real_distribution<float> real_dist(0, 255);
-
- const uint8_t constant_border_value = int_dist(gen);
- const float threshold = real_dist(gen);
-
- _target = compute_target(image, format, threshold, suppress_nonmax, border_mode, constant_border_value);
- _reference = compute_reference(image, format, threshold, suppress_nonmax, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, RawTensor raw)
- {
- library->fill(tensor, raw);
- }
-
- template <typename F, typename std::enable_if<std::is_same<F, CLFastCorners>::value, int>::type = 0>
- void configure_target(F &func, TensorType &src, ArrayType &corners, unsigned int *num_corners, float threshold, bool suppress_nonmax, BorderMode border_mode, uint8_t constant_border_value)
- {
- func.configure(&src, threshold, suppress_nonmax, &corners, num_corners, border_mode, constant_border_value);
- }
-
- template <typename F, typename std::enable_if<std::is_same<F, NEFastCorners>::value, int>::type = 0>
- void configure_target(F &func, TensorType &src, ArrayType &corners, unsigned int *num_corners, float threshold, bool suppress_nonmax, BorderMode border_mode, uint8_t constant_border_value)
- {
- ARM_COMPUTE_UNUSED(num_corners);
- func.configure(&src, threshold, suppress_nonmax, &corners, border_mode, constant_border_value);
- }
-
- ArrayType compute_target(const std::string &image, Format format, float threshold, bool suppress_nonmax, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Load the image (cached by the library if loaded before)
- const RawTensor &raw = library->get(image, format);
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(raw.shape(), format);
-
- // Create array of keypoints
- ArrayType corners(raw.shape().total_size());
- unsigned int num_corners = raw.shape().total_size();
-
- // Create and configure function
- FunctionType fast_corners;
- configure_target<FunctionType>(fast_corners, src, corners, &num_corners, threshold, suppress_nonmax, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), raw);
-
- // Compute function
- fast_corners.run();
-
- return corners;
- }
-
- std::vector<KeyPoint> compute_reference(const std::string &image, Format format, float threshold, bool suppress_nonmax, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Load the image (cached by the library if loaded before)
- const RawTensor &raw = library->get(image, format);
-
- // Create reference
- SimpleTensor<T> src{ raw.shape(), format };
-
- // Fill reference
- fill(src, raw);
-
- // Compute reference
- return reference::fast_corners<T>(src, threshold, suppress_nonmax, border_mode, constant_border_value);
- }
-
- ArrayType _target{};
- std::vector<KeyPoint> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FAST_CORNERS_FIXTURE */
diff --git a/tests/validation/fixtures/FillFixture.h b/tests/validation/fixtures/FillFixture.h
index c9817a3e28..0239a68903 100644
--- a/tests/validation/fixtures/FillFixture.h
+++ b/tests/validation/fixtures/FillFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class FillFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, DataType data_type)
{
_target = compute_target(input_shape, data_type);
diff --git a/tests/validation/fixtures/FlattenLayerFixture.h b/tests/validation/fixtures/FlattenLayerFixture.h
index d17080695b..e72487c7cf 100644
--- a/tests/validation/fixtures/FlattenLayerFixture.h
+++ b/tests/validation/fixtures/FlattenLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,7 +50,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class FlattenLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
TensorShape shape_flatten;
@@ -66,7 +65,10 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, 0);
}
@@ -80,15 +82,15 @@ protected:
FunctionType flatten_layer;
flatten_layer.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/FloorFixture.h b/tests/validation/fixtures/FloorFixture.h
index 246105e365..7d38666f47 100644
--- a/tests/validation/fixtures/FloorFixture.h
+++ b/tests/validation/fixtures/FloorFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class FloorValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
_target = compute_target(shape, data_type);
@@ -68,15 +67,15 @@ protected:
FunctionType floor_func;
floor_func.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 6952b226da..344187868f 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_FULLYCONNECTEDLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_FULLYCONNECTEDLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -34,6 +34,7 @@
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
+#include "tests/validation/Validation.h"
#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/FullyConnectedLayer.h"
#include "tests/validation/reference/Utils.h"
@@ -54,16 +55,63 @@ public:
using TBias = typename std::conditional < (std::is_same<TDecay, uint8_t>::value || std::is_same<TDecay, int8_t>::value), int32_t, T >::type;
public:
- template <typename...>
+ void setup_quantization(TensorShape weights_shape, TensorShape output_shape, QuantizationInfo &input_q_info, QuantizationInfo &weights_q_info, DataType data_type)
+ {
+ _hash = weights_shape[0] + weights_shape[1] + output_shape[0] + output_shape[1];
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ input_q_info = QuantizationInfo(scale_lhs, offset_lhs);
+ weights_q_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+
+ const int k = weights_shape.x();
+ QuantizationHint q_hint = suggest_mac_dst_q_info_and_bias(input_q_info, weights_q_info, k, data_type, 0.1f /* bias_fraction */, 4 /* number of standard deviations*/);
+
+ _dst_q_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+
+ // Do not change here as these limits are the natural limits of the associated data types and
+ // are embedded in the computation of the dst quantization info.
+ _min_u8 = 0;
+ _max_u8 = 255;
+ _min_s8 = -128;
+ _max_s8 = 127;
+ }
+
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
- DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info, bool mixed_layout = false)
{
ARM_COMPUTE_UNUSED(weights_shape);
ARM_COMPUTE_UNUSED(bias_shape);
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _quantization_info = quantization_info;
+
+ // Note : Quantization Info parameter from setup function is only used when quant datatype and activation function is not enabled or is identity.
+ if(is_data_type_quantized(data_type) && (!activation_info.enabled() || activation_info.activation() == ActivationFunction::IDENTITY))
+ {
+ // Initialises quantization info with appropriate scale and offset for given input shapes.
+ setup_quantization(weights_shape, output_shape,_input_q_info, _weight_q_info, data_type);
+ }
+ else
+ {
+ _input_q_info = quantization_info;
+ _weight_q_info = quantization_info;
+ _dst_q_info = quantization_info;
+ }
+
_activation_info = activation_info;
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
@@ -71,27 +119,47 @@ public:
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
if(_data_type == DataType::QASYMM8)
{
- std::uniform_int_distribution<uint8_t> distribution(0, 30);
+ std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8);
library->fill(tensor, distribution, i);
}
else if(_data_type == DataType::QASYMM8_SIGNED)
{
- std::uniform_int_distribution<int8_t> distribution(-15, 15);
+ std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8);
library->fill(tensor, distribution, i);
}
else if(_data_type == DataType::S32)
{
- std::uniform_int_distribution<int32_t> distribution(-50, 50);
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
library->fill(tensor, distribution, i);
}
- else if(is_data_type_float(_data_type))
+ else if(_data_type == DataType::F16)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ }
+ else if(_data_type == DataType::F32)
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
}
else
@@ -123,10 +191,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info);
- TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info);
- TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info);
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _input_q_info);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _weight_q_info);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _dst_q_info);
// Create Fully Connected layer info
FullyConnectedLayerInfo fc_info;
@@ -138,10 +206,12 @@ protected:
FunctionType fc;
fc.configure(&src, &weights, &bias, &dst, fc_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({ &src, &weights, &bias, &dst });
// Allocate tensors
src.allocator()->allocate();
@@ -149,14 +219,14 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(bias), 2);
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(bias), 2 + _hash);
if(!reshape_weights || !transpose_weights)
{
@@ -164,7 +234,7 @@ protected:
RawTensor tmp(tmp_shape, _data_type, 1);
// Fill with original shape
- fill(tmp, 1);
+ fill(tmp, 1 + _hash);
// Transpose elementwise
tmp = transpose(tmp);
@@ -181,11 +251,18 @@ protected:
}
else
{
- fill(AccessorType(weights), 1);
+ fill(AccessorType(weights), 1 + _hash);
}
- // Compute NEFullyConnectedLayer function
- fc.run();
+ if(_mixed_layout)
+ {
+ mix_layout(fc, src, dst);
+ }
+ else
+ {
+ // Compute NEFullyConnectedLayer function
+ fc.run();
+ }
return dst;
}
@@ -193,54 +270,384 @@ protected:
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape)
{
// Create reference
- SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info };
- SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
+ SimpleTensor<T> src{ input_shape, _data_type, 1, _input_q_info };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1, _weight_q_info };
+ SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, QuantizationInfo() };
// Fill reference
- fill(src, 0);
- fill(weights, 1);
- fill(bias, 2);
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(bias, 2 + _hash);
- return reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, output_shape), _activation_info, _quantization_info);
+ return reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, output_shape, _dst_q_info), _activation_info, _dst_q_info);
}
TensorType _target{};
SimpleTensor<T> _reference{};
DataType _data_type{};
DataType _bias_data_type{};
- QuantizationInfo _quantization_info{};
+ bool _mixed_layout{ false };
+ QuantizationInfo _input_q_info{};
+ QuantizationInfo _weight_q_info{};
+ QuantizationInfo _dst_q_info{};
ActivationLayerInfo _activation_info{};
+
+ // Random initialization limits
+ // Default values are previously handcrafted limits
+ // that sould be used when we don't use dynamic quantization
+ int32_t _min_bias{-50};
+ int32_t _max_bias{50};
+
+ int32_t _min_u8{0};
+ int32_t _max_u8{30};
+ int32_t _min_s8{-15};
+ int32_t _max_s8{15};
+ int _hash{0};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type,
ActivationLayerInfo activation_info)
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- QuantizationInfo(), activation_info);
+ QuantizationInfo(), activation_info, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FullyConnectedLayerValidationQuantizedFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type,
QuantizationInfo quantization_info, ActivationLayerInfo activation_info)
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- quantization_info, activation_info);
+ quantization_info, activation_info, mixed_layout);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FullyConnectedWithDynamicTensorsFixture : public framework::Fixture
+{
+private:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ if(_data_type == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ }
+ else if(_data_type == DataType::F32)
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ }
+ else if(_data_type == DataType::QASYMM8)
+ {
+ std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8);
+ library->fill(tensor, distribution, i);
+ }
+ else if(_data_type == DataType::QASYMM8_SIGNED)
+ {
+ std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8);
+ library->fill(tensor, distribution, i);
+ }
+ else if(_data_type == DataType::S32)
+ {
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ void fill_transposed_weights(TensorType &weights, TensorShape weights_shape, int seed)
+ {
+ RawTensor tmp(weights_shape, _data_type, 1);
+
+ // Fill with original shape
+ fill(tmp, seed);
+
+ // Transpose elementwise
+ tmp = transpose(tmp);
+
+ AccessorType weights_accessor(weights);
+
+ for(int i = 0; i < tmp.num_elements(); ++i)
+ {
+ Coordinates coord = index2coord(tmp.shape(), i);
+ std::copy_n(static_cast<const RawTensor::value_type *>(tmp(coord)),
+ tmp.element_size(),
+ static_cast<RawTensor::value_type *>(weights_accessor(coord)));
+ }
+ }
+
+ void validate_with_tolerance(TensorType &target, SimpleTensor<float> &ref)
+ {
+ constexpr RelativeTolerance<float> rel_tolerance_f32(0.01f);
+ constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.001f);
+ validate(AccessorType(target), ref, rel_tolerance_f32, 0, abs_tolerance_f32);
+ }
+
+ void validate_with_tolerance(TensorType &target, SimpleTensor<half_float::half> &ref)
+ {
+ constexpr AbsoluteTolerance<float> abs_tolerance_f16(0.3f);
+ const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f));
+ constexpr float tolerance_num_f16 = 0.07f;
+
+ validate(AccessorType(target), ref, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
+ }
+
+ void validate_with_tolerance(TensorType &target, SimpleTensor<uint8_t> &ref)
+ {
+ constexpr AbsoluteTolerance<uint32_t> tolerance_qasymm8(1);
+ validate(AccessorType(target), ref, tolerance_qasymm8);
+ }
+
+ void validate_with_tolerance(TensorType &target, SimpleTensor<int8_t> &ref)
+ {
+ constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8_signed(1);
+ validate(AccessorType(target), ref, tolerance_qasymm8_signed);
+ }
+
+ void setup_quantization(TensorShape weights_shape, TensorShape output_shape, QuantizationInfo &input_q_info, QuantizationInfo &weights_q_info, DataType data_type)
+ {
+ _hash = weights_shape[0] + weights_shape[1] + output_shape[0] + output_shape[1];
+
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ input_q_info = QuantizationInfo(scale_lhs, offset_lhs);
+ weights_q_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ const int k = weights_shape.x();
+ QuantizationHint q_hint = suggest_mac_dst_q_info_and_bias(input_q_info, weights_q_info, k, data_type, 0.1f /* bias_fraction */, 4 /* number of standard deviations*/);
+
+ _dst_q_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+
+ // Do not change here as these limits are the natural limits of the associated data types and
+ // are embedded in the computation of the dst quantization info.
+ _min_u8 = 0;
+ _max_u8 = 255;
+ _min_s8 = -128;
+ _max_s8 = 127;
+ }
+
+public:
+ using TDecay = typename std::decay<T>::type;
+ using TBias = typename std::conditional < (std::is_same<TDecay, uint8_t>::value || std::is_same<TDecay, int8_t>::value), int32_t, T >::type;
+
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
+ DataType data_type, ActivationLayerInfo activation_info, bool constant_weights, bool constant_bias, bool weights_reshaped, bool remove_bias = false)
+ {
+ _data_type = data_type;
+
+ const bool is_quantized = is_data_type_quantized(data_type);
+ const DataType bias_data_type = (is_quantized) ? DataType::S32 : data_type;
+
+ if (is_quantized && (!activation_info.enabled() || activation_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(weights_shape, dst_shape, _src_q_info, _weights_q_info, data_type);
+ }
+ else
+ {
+ _src_q_info = QuantizationInfo(0.1f, 10);
+ _dst_q_info = QuantizationInfo(0.3f, 20);
+ _weights_q_info = QuantizationInfo(0.2f, 5);
+ }
+
+ // Configure TensorInfo Objects
+ const TensorInfo src_info(src_shape, 1, data_type, _src_q_info);
+ const TensorInfo dst_info(dst_shape, 1, data_type, _dst_q_info);
+ TensorInfo bias_info(bias_shape, 1, bias_data_type);
+ TensorInfo wei_info(weights_shape, 1, data_type, _weights_q_info);
+
+ if(!constant_weights && weights_reshaped)
+ {
+ const TensorShape tr_weights_shape{ weights_shape[1], weights_shape[0] };
+ wei_info.set_tensor_shape(tr_weights_shape);
+ }
+ wei_info.set_are_values_constant(constant_weights);
+ bias_info.set_are_values_constant(constant_bias);
+
+ // Initialise Tensors
+ _src.allocator()->init(src_info);
+ _weights.allocator()->init(wei_info);
+ if(!remove_bias)
+ _bias.allocator()->init(bias_info);
+ _dst.allocator()->init(dst_info);
+
+ // Configure FC layer and mark the weights as non constant
+ FullyConnectedLayerInfo fc_info;
+ fc_info.activation_info = activation_info;
+ if(!constant_weights)
+ {
+ fc_info.are_weights_reshaped = weights_reshaped;
+ fc_info.transpose_weights = !weights_reshaped;
+ }
+ FunctionType fc;
+ fc.configure(&_src, &_weights, (remove_bias) ? nullptr : &_bias, &_dst, fc_info);
+
+ // Allocate all the tensors
+ _src.allocator()->allocate();
+ _weights.allocator()->allocate();
+ if(!remove_bias)
+ _bias.allocator()->allocate();
+ _dst.allocator()->allocate();
+
+ // Run multiple iterations with different inputs
+ constexpr int num_iterations = 5;
+ int randomizer_offset = 0;
+
+ // Create reference tensors
+ SimpleTensor<T> src{ src_shape, data_type, 1, _src_q_info };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, _weights_q_info };
+ SimpleTensor<TBias> bias{ bias_shape, bias_data_type };
+
+ // Fill weights and/or bias if they remain constant
+ if(constant_weights)
+ {
+ fill(AccessorType(_weights), 1 + _hash);
+ fill(weights, 1 + _hash);
+ }
+ if(constant_bias && !remove_bias)
+ {
+ fill(AccessorType(_bias), 2 + _hash);
+ fill(bias, 2 + _hash);
+ }
+ // To remove bias, fill with 0
+ if(remove_bias && is_quantized)
+ {
+ library->fill_tensor_value(bias, 0);
+ }
+ else if(remove_bias)
+ {
+ library->fill_tensor_value(bias, (float)0.0);
+ }
+
+ for(int i = 0; i < num_iterations; ++i)
+ {
+ // Run target
+ {
+ fill(AccessorType(_src), randomizer_offset);
+ if(!constant_weights)
+ {
+ if(weights_reshaped)
+ {
+ fill_transposed_weights(_weights, weights_shape, randomizer_offset + 1 + _hash);
+ }
+ else
+ {
+ fill(AccessorType(_weights), randomizer_offset + 1 +_hash);
+ }
+ }
+ if(!constant_bias && !remove_bias)
+ {
+ fill(AccessorType(_bias), randomizer_offset + 2 + _hash);
+ }
+
+ fc.run();
+ }
+
+ // Run reference and compare
+ {
+ // Fill reference
+ fill(src, randomizer_offset);
+ if(!constant_weights)
+ {
+ fill(weights, randomizer_offset + 1 + _hash);
+ }
+ if(!constant_bias && !remove_bias)
+ {
+ fill(bias, randomizer_offset + 2 + _hash);
+ }
+
+ auto dst = reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, dst_shape, _dst_q_info), activation_info, _dst_q_info);
+
+ // Validate
+ validate_with_tolerance(_dst, dst);
+ }
+
+ randomizer_offset += 100;
+ }
+ }
+
+private:
+ TensorType _src{}, _weights{}, _bias{}, _dst{};
+ DataType _data_type{ DataType::UNKNOWN };
+
+ QuantizationInfo _src_q_info{};
+ QuantizationInfo _weights_q_info{};
+ QuantizationInfo _dst_q_info{};
+
+ // Random initialization limits
+ // Default values are previously handcrafted limits
+ // that sould be used when we don't use dynamic quantization
+ int32_t _min_bias{-50};
+ int32_t _max_bias{50};
+
+ int32_t _min_u8{0};
+ int32_t _max_u8{30};
+ int32_t _min_s8{-15};
+ int32_t _max_s8{15};
+ int _hash{0};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FullyConnectedWithDynamicWeightsFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
+ DataType data_type, ActivationLayerInfo activation_info, bool weights_reshaped)
+ {
+ FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape,
+ dst_shape, data_type, activation_info, false, true, weights_reshaped, false);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FullyConnectedDynamicNoBiasFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
+ DataType data_type, ActivationLayerInfo activation_info, bool weights_reshaped)
+ {
+ FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape,
+ dst_shape, data_type, activation_info, false, true, weights_reshaped, true);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FullyConnectedWithDynamicBiasFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
+ DataType data_type, ActivationLayerInfo activation_info)
+ {
+ FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape,
+ dst_shape, data_type, activation_info, true, false, false, false);
}
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_FULLYCONNECTEDLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/FuseBatchNormalizationFixture.h b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
index 780b4a0fb3..a05e4169a7 100644
--- a/tests/validation/fixtures/FuseBatchNormalizationFixture.h
+++ b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, int
class FuseBatchNormalizationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta)
{
std::tie(_target_w, _target_b) = compute_target(shape_w, data_type, data_layout, in_place, with_bias, with_gamma, with_beta);
@@ -96,14 +95,14 @@ protected:
FunctionType fuse_batch_normalization;
fuse_batch_normalization.configure(&w, &mean, &var, w_fused_to_use, b_fused_to_use, b_to_use, beta_to_use, gamma_to_use, _epsilon, fuse_bn_type);
- ARM_COMPUTE_EXPECT(w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(w_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(w_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(gamma.info()->is_resizable());
// Allocate tensors
w.allocator()->allocate();
@@ -115,14 +114,14 @@ protected:
beta.allocator()->allocate();
gamma.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!w_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!w_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!gamma.info()->is_resizable());
// Fill tensors
fill(AccessorType(w), 0U, -1.0f, 1.0f);
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index efe7567075..94bedc83e1 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_GEMM_FIXTURE
-#define ARM_COMPUTE_TEST_GEMM_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_GEMMFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_GEMMFIXTURE_H
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/TensorShape.h"
@@ -34,6 +34,7 @@
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/ElementwiseOperations.h"
#include "tests/validation/reference/GEMM.h"
#include <random>
@@ -44,16 +45,15 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
-class GEMMValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool pretranspose_a = false, bool pretranspose_b = false, bool run_twice = false>
+class GEMMGenericValidationFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type, bool accumulate=false)
{
ARM_COMPUTE_UNUSED(pretranspose);
- _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
- _reference = compute_reference(shape_a, shape_b, output_shape, alpha, beta, data_type);
+ _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, accumulate);
+ _reference = compute_reference(shape_a, shape_b, output_shape, alpha, beta, data_type, accumulate);
}
protected:
@@ -63,9 +63,14 @@ protected:
switch(tensor.data_type())
{
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(lo), float(hi) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(lo, hi);
+ std::uniform_real_distribution<float> distribution(lo, hi);
library->fill(tensor, distribution, i);
break;
}
@@ -75,7 +80,7 @@ protected:
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta,
- DataType data_type)
+ DataType data_type, bool accumulate=false)
{
// Create tensors
TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
@@ -93,12 +98,14 @@ protected:
(disable_c) ? nullptr : &c,
&dst,
alpha, beta,
- GEMMInfo(false, false, false, (reinterpret_output_as_3d ? output_shape[2] : 0), reinterpret_input_as_3d, false, GEMMLowpOutputStageInfo(), false, (reinterpret_input_as_3d
- || reinterpret_output_as_3d)));
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ GEMMInfo(false, false, false, (reinterpret_output_as_3d ? output_shape[2] : 0), reinterpret_input_as_3d, false, GEMMLowpOutputStageInfo(), false, false, (reinterpret_input_as_3d
+ || reinterpret_output_as_3d), arm_compute::ActivationLayerInfo(), false /* fixed_format */, arm_compute::WeightFormat::UNSPECIFIED, false /* pretranspose_B */, accumulate));
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(c.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({ &a, &b, &c, &dst });
// Allocate tensors
a.allocator()->allocate();
@@ -106,18 +113,33 @@ protected:
c.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(a), 0);
fill(AccessorType(b), 1);
+ if (accumulate)
+ {
+ fill(AccessorType(dst), 6);
+ }
if(!disable_c)
{
fill(AccessorType(c), 2);
}
+ // Run with variable inputs.
+ if(run_twice)
+ {
+ gemm.run();
+ fill(AccessorType(a), 3); // Fill tensors with new seed after run
+ fill(AccessorType(b), 4);
+ if(!disable_c)
+ {
+ fill(AccessorType(c), 5);
+ }
+ }
// Compute GEMM function
gemm.run();
@@ -126,10 +148,9 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, float alpha, float beta,
- DataType data_type)
+ DataType data_type, bool accumulate=false)
{
TensorShape shape_a_to_use = shape_a;
-
if(reinterpret_input_as_3d)
{
// Collapse the second and third dimension if the input is 3D
@@ -140,6 +161,7 @@ protected:
SimpleTensor<T> a{ shape_a_to_use, data_type, 1 };
SimpleTensor<T> b{ shape_b, data_type, 1 };
SimpleTensor<T> c{ output_shape, data_type, 1 };
+ SimpleTensor<T> dst{ output_shape, data_type, 1 };
// Fill reference
fill(a, 0);
@@ -152,27 +174,96 @@ protected:
const int m = reinterpret_output_as_3d ? output_shape[1] * output_shape[2] : output_shape[1];
const int batch_size = reinterpret_output_as_3d ? output_shape[3] : output_shape[2];
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(c.data() + i * n, c.data(), n * sizeof(T));
}
}
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+
+ // Define transposed shapes
+ TensorShape a_transposed_shape(a.shape().y(), a.shape().x());
+ TensorShape b_transposed_shape(b.shape().y(), b.shape().x());
+
+ // Define transposed tensors
+ SimpleTensor<T> a_transposed{ a_transposed_shape, data_type };
+ SimpleTensor<T> b_transposed{ b_transposed_shape, data_type };
+
+ // pretranspose a if necessary
+ if(pretranspose_a)
+ {
+ transpose_matrix<T>(a, a_transposed);
+ }
+
+ // pretranspose b if necessary
+ if(pretranspose_b)
+ {
+ transpose_matrix<T>(b, b_transposed);
+ }
+
+ // Run with variable inputs.
+ if(run_twice)
+ {
+ reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, alpha, disable_c ? 0.f : beta);
+ fill((pretranspose_a) ? a_transposed : a, 3);
+ fill((pretranspose_b) ? b_transposed : b, 4);
+ fill(c, 5);
+ }
+
+ // Do in place summation
+ if (accumulate)
+ {
+ fill(dst, 6);
+ }
+
// Setting beta to 0 will effectively disable C for the
// computation of the reference: alpha * A * B + 0 * C
- return reference::gemm<T>(a, b, c, alpha, disable_c ? 0.f : beta);
+ // Use transposed tensors if boolean enabled else use original tensors
+ if (accumulate)
+ {
+ reference::gemm_accumulate<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, alpha, disable_c ? 0.f : beta, dst);
+ return dst;
+ }
+ else
+ {
+ return reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, alpha, disable_c ? 0.f : beta);
+ }
}
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool pretranspose_a = false, bool pretranspose_b = false, bool run_twice = false>
+class GEMMValidationFixture : protected GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type)
+ {
+ GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, pretranspose, data_type, false /*accumulate*/);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool pretranspose_a = false, bool pretranspose_b = false, bool run_twice = false>
+class GEMMAccumulateValidationFixture : protected GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type)
+ {
+ bool accumulate = true;
+ GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, pretranspose, data_type, accumulate);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiplyValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, bool broadcast_bias, bool fp16_mixed_precision, const ActivationLayerInfo &act_info,
DataType data_type, GPUTarget gpu_arch)
{
@@ -191,11 +282,14 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
// Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0)
- std::uniform_real_distribution<> distribution_inf(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
+ DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) };
library->fill_borders_with_garbage(tensor, distribution_inf, i);
}
@@ -216,12 +310,14 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(gpu_arch, &lhs, &rhs, &bias, &dst, alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
+ GEMMOperatorType gemm;
+ gemm.configure(gpu_arch, lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ add_padding_x({ &lhs, &rhs, &bias, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -229,10 +325,10 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -240,7 +336,11 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -268,7 +368,7 @@ protected:
if(broadcast_bias)
{
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -282,11 +382,10 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiply3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, bool broadcast_bias, bool fp16_mixed_precision,
const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch)
{
@@ -308,7 +407,10 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
@@ -329,12 +431,14 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(gpu_arch, &lhs, &rhs, &bias, &dst, alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
+ GEMMOperatorType gemm;
+ gemm.configure(gpu_arch, lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &lhs, &rhs, &bias, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -342,10 +446,10 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -353,7 +457,11 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -381,7 +489,7 @@ protected:
fill(rhs, 1);
fill(bias, 2);
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -394,11 +502,10 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyInterleavedTransposedValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, unsigned int v0, unsigned int h0, bool broadcast_bias, bool fp16_mixed_precision,
const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch)
{
@@ -431,11 +538,14 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
// Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0)
- std::uniform_real_distribution<> distribution_inf(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
+ DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) };
library->fill_borders_with_garbage(tensor, distribution_inf, i);
}
@@ -458,16 +568,22 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(gpu_arch, &lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(gpu_arch, lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ // We do not pad when using image as it needs to comply to strict pitch alignment restrictions
+ if(!rhs_info.export_to_cl_image)
+ {
+ add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+ }
// Allocate tensors
lhs.allocator()->allocate();
@@ -477,12 +593,12 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -490,9 +606,15 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -520,7 +642,7 @@ protected:
if(broadcast_bias)
{
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -534,11 +656,10 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyInterleavedTransposed3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, unsigned int v0, unsigned int h0, bool broadcast_bias,
bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch)
{
@@ -574,7 +695,10 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
@@ -597,16 +721,22 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(gpu_arch, &lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
-
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(gpu_arch, lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ // We do not pad when using image as it needs to comply to strict pitch alignment restrictions
+ if(!rhs_info.export_to_cl_image)
+ {
+ add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+ }
// Allocate tensors
lhs.allocator()->allocate();
@@ -616,12 +746,12 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -629,9 +759,15 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -659,7 +795,7 @@ protected:
fill(rhs, 1);
fill(bias, 2);
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -672,13 +808,12 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType, bool fp_mixed_precision = false>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType, bool fp_mixed_precision = false>
class GEMMMatrixMultiplyReshapedValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
- bool interleave_rhs, DataType data_type, float alpha, float beta, bool broadcast_bias, bool lhs_transpose, const ActivationLayerInfo &act_info)
+ bool interleave_rhs, bool export_to_cl_image, DataType data_type, float alpha, float beta, bool broadcast_bias, bool lhs_transpose, const ActivationLayerInfo &act_info)
{
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = m0;
@@ -688,11 +823,12 @@ public:
lhs_info.transpose = lhs_transpose;
GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0;
- rhs_info.k0 = k0;
- rhs_info.h0 = h0;
- rhs_info.interleave = interleave_rhs;
- rhs_info.transpose = !lhs_transpose;
+ rhs_info.n0 = n0;
+ rhs_info.k0 = k0;
+ rhs_info.h0 = h0;
+ rhs_info.interleave = interleave_rhs;
+ rhs_info.transpose = !lhs_transpose;
+ rhs_info.export_to_cl_image = export_to_cl_image;
// Set the tensor shapes for LHS and RHS matrices
const TensorShape lhs_shape(k, m, batch_size);
@@ -701,19 +837,25 @@ public:
broadcast_bias ? 1 : m,
broadcast_bias ? 1 : batch_size);
- _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
+ if(validate_result)
+ {
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ }
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
// Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0)
- std::uniform_real_distribution<> distribution_inf(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
+ DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) };
library->fill_borders_with_garbage(tensor, distribution_inf, i);
}
@@ -744,16 +886,30 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+
+ validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
+ validate_result = validate_result || !rhs_info.export_to_cl_image;
+ if(!validate_result)
+ {
+ return nullptr;
+ }
+
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ // We do not pad when using image as it needs to comply to strict pitch alignment restrictions
+ if(!rhs_info.export_to_cl_image)
+ {
+ add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+ }
// Allocate tensors
lhs.allocator()->allocate();
@@ -763,12 +919,12 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -776,9 +932,15 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -806,7 +968,7 @@ protected:
if(broadcast_bias)
{
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -823,18 +985,17 @@ protected:
}
}
+ bool validate_result = true;
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType, bool fp_mixed_precision = false>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType, bool fp_mixed_precision = false>
class GEMMMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
- bool interleave_lhs,
- bool interleave_rhs, DataType data_type, float alpha, float beta, bool lhs_transpose, const ActivationLayerInfo &act_info)
+ bool interleave_lhs, bool interleave_rhs, bool export_to_cl_image, DataType data_type, float alpha, float beta, bool lhs_transpose, const ActivationLayerInfo &act_info)
{
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = m0;
@@ -844,11 +1005,12 @@ public:
lhs_info.transpose = lhs_transpose;
GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0;
- rhs_info.k0 = k0;
- rhs_info.h0 = h0;
- rhs_info.interleave = interleave_rhs;
- rhs_info.transpose = !lhs_transpose;
+ rhs_info.n0 = n0;
+ rhs_info.k0 = k0;
+ rhs_info.h0 = h0;
+ rhs_info.interleave = interleave_rhs;
+ rhs_info.transpose = !lhs_transpose;
+ rhs_info.export_to_cl_image = export_to_cl_image;
// In case of GEMM3D, m is the product between m_w and m_h
const unsigned int m = m_w * m_h;
@@ -858,15 +1020,21 @@ public:
const TensorShape rhs_shape(n, k, batch_size);
const TensorShape bias_shape(n, 1, 1);
- _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
+ _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info);
+ if(validate_result)
+ {
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
+ }
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
@@ -897,16 +1065,30 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+
+ validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
+ validate_result = validate_result || !rhs_info.export_to_cl_image;
+ if(!validate_result)
+ {
+ return nullptr;
+ }
+
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ // We do not pad when using image as it needs to comply to strict pitch alignment restrictions
+ if(!rhs_info.export_to_cl_image)
+ {
+ add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+ }
// Allocate tensors
lhs.allocator()->allocate();
@@ -916,12 +1098,12 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -929,9 +1111,15 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -959,7 +1147,7 @@ protected:
fill(rhs, 1);
fill(bias, 2);
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -975,28 +1163,29 @@ protected:
}
}
+ bool validate_result = true;
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0,
- bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info)
+ bool interleave_rhs, bool transpose_rhs, bool export_to_cl_image, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info)
{
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = m0;
lhs_info.k0 = k0;
GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0;
- rhs_info.k0 = k0;
- rhs_info.h0 = h0;
- rhs_info.interleave = interleave_rhs;
- rhs_info.transpose = transpose_rhs;
+ rhs_info.n0 = n0;
+ rhs_info.k0 = k0;
+ rhs_info.h0 = h0;
+ rhs_info.interleave = interleave_rhs;
+ rhs_info.transpose = transpose_rhs;
+ rhs_info.export_to_cl_image = export_to_cl_image;
// Set the tensor shapes for LHS and RHS matrices
const TensorShape lhs_shape(k, m, batch_size);
@@ -1005,19 +1194,25 @@ public:
broadcast_bias ? 1 : m,
broadcast_bias ? 1 : batch_size);
- _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
+ if(validate_result)
+ {
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ }
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
// Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0)
- std::uniform_real_distribution<> distribution_inf(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
+ DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) };
library->fill_borders_with_garbage(tensor, distribution_inf, i);
}
@@ -1046,14 +1241,28 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+
+ validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
+ validate_result = validate_result || !rhs_info.export_to_cl_image;
+ if(!validate_result)
+ {
+ return nullptr;
+ }
+
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // We do not pad when using image as it needs to comply to strict pitch alignment restrictions
+ if(!rhs_info.export_to_cl_image)
+ {
+ add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst });
+ }
// Allocate tensors
lhs.allocator()->allocate();
@@ -1062,11 +1271,11 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -1074,8 +1283,13 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1103,7 +1317,7 @@ protected:
if(broadcast_bias)
{
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -1113,28 +1327,29 @@ protected:
return reference::activation_layer(reference::gemm<T>(lhs, rhs, bias, alpha, beta), act_info);
}
+ bool validate_result = true;
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0,
- bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info)
+ bool interleave_rhs, bool transpose_rhs, bool export_to_cl_image, bool has_pad_y, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info)
{
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = m0;
lhs_info.k0 = k0;
GEMMRHSMatrixInfo rhs_info;
- rhs_info.n0 = n0;
- rhs_info.k0 = k0;
- rhs_info.h0 = h0;
- rhs_info.interleave = interleave_rhs;
- rhs_info.transpose = transpose_rhs;
+ rhs_info.n0 = n0;
+ rhs_info.k0 = k0;
+ rhs_info.h0 = h0;
+ rhs_info.interleave = interleave_rhs;
+ rhs_info.transpose = transpose_rhs;
+ rhs_info.export_to_cl_image = export_to_cl_image;
// In case of GEMM3D, m is the product between m_w and m_h
const unsigned int m = m_w * m_h;
@@ -1144,21 +1359,27 @@ public:
const TensorShape rhs_shape(n, k, batch_size);
const TensorShape bias_shape(n, 1, 1);
- _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info);
- _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
+ _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info, has_pad_y);
+ if(validate_result)
+ {
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info);
+ }
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
DataType data_type, float alpha, float beta,
- unsigned int m_h, const ActivationLayerInfo &act_info)
+ unsigned int m_h, const ActivationLayerInfo &act_info, bool has_pad_y)
{
// Create tensors
TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
@@ -1178,18 +1399,39 @@ protected:
kernel_info.reinterpret_input_as_3d = false;
kernel_info.broadcast_bias = true;
kernel_info.activation_info = act_info;
+ kernel_info.has_pad_y = has_pad_y;
// The output tensor will be auto-initialized within the function
-
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+
+ validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
+ validate_result = validate_result || !rhs_info.export_to_cl_image;
+ if(!validate_result)
+ {
+ return nullptr;
+ }
+
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ if(has_pad_y)
+ {
+ // Add dummy padding into lhs to validate has_pad_y path
+ lhs.info()->extend_padding(PaddingSize(2, 0, 2, 0));
+ dst.info()->extend_padding(PaddingSize(2, 0, 1, 0));
+ }
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ // We do not pad when using image as it needs to comply to strict pitch alignment restrictions
+ if(!rhs_info.export_to_cl_image)
+ {
+ add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst });
+ }
// Allocate tensors
lhs.allocator()->allocate();
@@ -1198,11 +1440,11 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -1210,8 +1452,13 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1239,7 +1486,7 @@ protected:
fill(rhs, 1);
fill(bias, 2);
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -1248,15 +1495,15 @@ protected:
return reference::activation_layer(reference::gemm<T>(lhs, rhs, bias, alpha, beta), act_info);
}
+ bool validate_result = true;
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiplyNativeValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta, bool broadcast_bias,
const ActivationLayerInfo &act_info)
{
@@ -1283,11 +1530,14 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
// Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0)
- std::uniform_real_distribution<> distribution_inf(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity());
+ DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) };
library->fill_borders_with_garbage(tensor, distribution_inf, i);
}
@@ -1313,12 +1563,14 @@ protected:
kernel_info.activation_info = act_info;
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(&lhs, &rhs, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ GEMMOperatorType gemm;
+ gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &lhs, &rhs, &bias, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -1326,10 +1578,10 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -1337,7 +1589,11 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1365,7 +1621,7 @@ protected:
if(broadcast_bias)
{
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -1379,11 +1635,10 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiplyNative3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta,
const ActivationLayerInfo &act_info)
{
@@ -1411,7 +1666,10 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
@@ -1439,12 +1697,14 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(&lhs, &rhs, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ GEMMOperatorType gemm;
+ gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ add_padding_x({ &lhs, &rhs, &bias, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -1452,10 +1712,10 @@ protected:
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
@@ -1463,7 +1723,11 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1491,7 +1755,7 @@ protected:
fill(rhs, 1);
fill(bias, 2);
- // In case of broadcast, we need simply copy the first into the following "M" ones
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
for(int i = 1; i < m * batch_size; i++)
{
memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
@@ -1504,7 +1768,170 @@ protected:
SimpleTensor<T> _reference{};
};
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
+class GEMMMatrixMultiplyReshapedOnlyRhsMMULValidationFixture : public framework::Fixture
+{
+public:
+ void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, bool export_to_cl_image, DataType data_type, float alpha,
+ float beta, bool broadcast_bias,
+ const ActivationLayerInfo &act_info)
+ {
+ GEMMLHSMatrixInfo lhs_info;
+ lhs_info.m0 = m0;
+ lhs_info.k0 = k0;
+
+ GEMMRHSMatrixInfo rhs_info;
+ rhs_info.n0 = n0;
+ rhs_info.k0 = k0;
+ rhs_info.interleave = true;
+ rhs_info.transpose = false;
+ rhs_info.h0 = 4;
+ rhs_info.export_to_cl_image = export_to_cl_image;
+
+ // Set the tensor shapes for LHS and RHS matrices
+ const TensorShape lhs_shape(k, m, batch_size);
+ const TensorShape rhs_shape(n, k, batch_size);
+ const TensorShape bias_shape(n,
+ broadcast_bias ? 1 : m,
+ broadcast_bias ? 1 : batch_size);
+
+ _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
+ library->fill(tensor, distribution, i);
+
+ // Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0)
+ DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) };
+ library->fill_borders_with_garbage(tensor, distribution_inf, i);
+ }
+
+ TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
+ DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info)
+ {
+ // Create tensors
+ TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
+ TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
+ TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
+ TensorType rhs_reshaped;
+ TensorType dst;
+
+ const unsigned int M = lhs_shape[1];
+ const unsigned int N = rhs_shape[0];
+ const unsigned int K = lhs_shape[0];
+ GEMMKernelInfo kernel_info;
+ kernel_info.m = M;
+ kernel_info.n = N;
+ kernel_info.k = K;
+ kernel_info.depth_output_gemm3d = 0;
+ kernel_info.reinterpret_input_as_3d = false;
+ kernel_info.broadcast_bias = broadcast_bias;
+ kernel_info.activation_info = act_info;
+
+ // Create and configure function
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+
+ validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
+ if(!validate_result)
+ {
+ return nullptr;
+ }
+
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+
+ validate_result = bool(gemm.validate(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info));
+ if(!validate_result)
+ {
+ return nullptr;
+ }
+
+ gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ // Allocate tensors
+ lhs.allocator()->allocate();
+ rhs.allocator()->allocate();
+ rhs_reshaped.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(lhs), 0);
+ fill(AccessorType(rhs), 1);
+ fill(AccessorType(bias), 2);
+
+ // Compute GEMM
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias,
+ const ActivationLayerInfo &act_info)
+ {
+ if(!validate_result)
+ return SimpleTensor<T>();
+
+ TensorShape dst_shape = lhs_shape;
+ dst_shape[0] = rhs_shape[0];
+ dst_shape[1] = lhs_shape[1];
+
+ // Create reference
+ SimpleTensor<T> lhs{ lhs_shape, data_type, 1 };
+ SimpleTensor<T> rhs{ rhs_shape, data_type, 1 };
+ SimpleTensor<T> bias{ dst_shape, data_type, 1 };
+
+ const int n = rhs_shape[0];
+ const int m = lhs_shape[1];
+ const int batch_size = lhs_shape[2];
+
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
+ fill(bias, 2);
+
+ if(broadcast_bias)
+ {
+ // In case of broadcast, we need to simply copy the first into the following "M" ones
+ for(int i = 1; i < m * batch_size; i++)
+ {
+ memcpy(bias.data() + i * n, bias.data(), n * sizeof(T));
+ }
+ }
+
+ return reference::activation_layer(reference::gemm<T>(lhs, rhs, bias, alpha, beta), act_info);
+ }
+
+ bool validate_result = true;
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMM_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_GEMMFIXTURE_H
diff --git a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
index 26ffd3636a..59fc460869 100644
--- a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
+++ b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class GEMMInterleave4x4ValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(size_t x, size_t y, DataType data_type)
{
_data_type = data_type;
@@ -63,9 +62,14 @@ protected:
switch(tensor.data_type())
{
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -83,24 +87,25 @@ protected:
// Create and configure function
FunctionType f;
- f.configure(&a, &b);
+ f.configure(a.info(), b.info());
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensors
fill(AccessorType(a), 0);
fill(AccessorType(b), 0);
- // Compute GEMM function
- f.run();
+ // Compute GEMM interleave kernel
+ ITensorPack tensors{ { ACL_SRC, &a }, { ACL_DST, &b } };
+ f.run(tensors);
return b;
}
diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h
deleted file mode 100644
index 03949063e0..0000000000
--- a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE
-#define ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/GEMMLowp.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T2>
-class GEMMLowpAssemblyFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(size_t m, size_t n, size_t k)
- {
- const TensorShape shape_a(k, m);
- const TensorShape shape_b(n, k);
- const TensorShape shape_c(n, m);
- _target = compute_target(shape_a, shape_b, shape_c);
- _reference = compute_reference(shape_a, shape_b, shape_c);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i, int lo, int hi)
- {
- std::uniform_int_distribution<> distribution(lo, hi);
- library->fill(tensor, distribution, i);
- }
-
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
- {
- DataType dt_in = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8;
-
- // Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, dt_in, 1);
- TensorType b = create_tensor<TensorType>(shape_b, dt_in, 1);
- TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
-
- // Create and configure function
- FunctionType gemmlowp;
- gemmlowp.configure(&a, &b, nullptr, &c);
-
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- a.allocator()->allocate();
- b.allocator()->allocate();
- c.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- if(dt_in == DataType::S8)
- {
- fill(AccessorType(a), 0, -128, 127);
- fill(AccessorType(b), 1, -128, 127);
- }
- else
- {
- fill(AccessorType(a), 0, 0, 255);
- fill(AccessorType(b), 1, 0, 255);
- }
- fill(AccessorType(c), 2, 0, 0);
-
- // Compute GEMM function
- gemmlowp.run();
- return c;
- }
-
- SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
- {
- DataType dt = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8;
-
- // Create reference
- SimpleTensor<T2> a{ shape_a, dt, 1 };
- SimpleTensor<T2> b{ shape_b, dt, 1 };
-
- // Fill reference
- if(dt == DataType::S8)
- {
- fill(a, 0, -128, 127);
- fill(b, 1, -128, 127);
- }
- else
- {
- fill(a, 0, 0, 255);
- fill(b, 1, 0, 255);
- }
-
- return reference::gemmlowp<int32_t, T2>(a, b, shape_c);
- }
-
- TensorType _target{};
- SimpleTensor<int32_t> _reference{};
-};
-
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index e3dc7381fc..aa4eedb75d 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,22 +21,20 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
-#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_GEMMLOWPFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_GEMMLOWPFIXTURE_H
-#include "arm_compute/core/KernelDescriptors.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
+#include "src/core/utils/quantization/AsymmHelpers.h"
#include "tests/validation/Helpers.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Validation.h"
#include "tests/validation/reference/GEMMLowp.h"
+#include "tests/validation/reference/ArithmeticOperations.h"
+#include "tests/validation/reference/DequantizationLayer.h"
-#include <random>
+#include <cstdint>
+#include <vector>
namespace arm_compute
{
@@ -49,115 +47,145 @@ namespace
template <typename U>
void fill(U &&tensor, int i)
{
- switch(tensor.data_type())
- {
- case DataType::QSYMM8_PER_CHANNEL:
- {
- int min_bound = 128;
- int max_bound = -127;
- for(size_t j = 0; j < tensor.quantization_info().scale().size(); j++)
- {
- std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
- if(bounds.first < min_bound)
- {
- min_bound = bounds.first;
- }
- if(bounds.second > max_bound)
- {
- max_bound = bounds.second;
- }
- }
- std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::QASYMM8:
- {
- std::uniform_int_distribution<uint8_t> distribution(1, 254);
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::F16:
- case DataType::F32:
- {
- // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- library->fill_tensor_uniform(tensor, i);
- }
+ library->fill_tensor_uniform(tensor, i);
}
-template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false>
-TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
- GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
- QuantizationInfo b_qinfo = QuantizationInfo())
+template <typename U>
+void fill_quantized(U &&tensor, int i)
{
- // Create tensors
- DataType data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
-
- TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1);
- TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated
- TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1);
-
- a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
+ ARM_COMPUTE_ASSERT(is_data_type_quantized(tensor.data_type()));
+ library->fill_tensor_uniform(tensor, i);
+}
- if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
+template <typename U>
+void fill(U &&tensor, int i, int32_t min, int32_t max)
+{
+ if (tensor.data_type() == DataType::S32) {
+ std::uniform_int_distribution<int32_t> distribution(min, max);
+ library->fill(tensor, distribution, i);
+ }
+ else if(tensor.data_type() == DataType::F32)
{
- b.info()->set_quantization_info(b_qinfo);
+ std::uniform_real_distribution<float> distribution((float)min, (float)max);
+ library->fill(tensor, distribution, i);
}
else
{
- b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+
+/** Information about how to fill tensors */
+struct TensorFillInfo
+{
+ // Bias fill range. Default values are arbitrary
+ int32_t min_bias {-20000};
+ int32_t max_bias {20000};
+
+ // Output fill range. Default values are arbitrary
+ int32_t min_output {-20000};
+ int32_t max_output {20000};
+
+ // Optional extra hash to randomize tensor filling
+ int32_t hash {0};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false, bool run_twice = false>
+TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo,
+ const QuantizationInfo& output_qinfo, DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
+ GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo(),
+ bool accumulate = false, bool dynamic_qinfo = false, DataType data_type_output = DataType::UNKNOWN)
+{
+ ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a));
+ ARM_COMPUTE_ASSERT(data_type_a == data_type_b);
+ // If unknown, set to sensible defaults
+ if (data_type_output == DataType::UNKNOWN) {
+ data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
}
+
+ // Create tensors
+ TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1, dynamic_qinfo ? QuantizationInfo(1.0,0,true) : a_qinfo);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1, dynamic_qinfo ? QuantizationInfo(1.0,0,true) : b_qinfo); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated
+ TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1, output_qinfo /* output_qinfo will be ignored when output stage type is None */);
+
TensorType bias;
if(is_fused)
{
TensorShape bias_shape(shape_b[0]);
- bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
+ bias = create_tensor<TensorType>(bias_shape,data_type_output == DataType::F32 ? DataType::F32 : DataType::S32, 1);
}
// Create and configure function
// The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
FunctionType gemmlowp;
- // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution
- gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, output_stage));
+ gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, reshape_b_only_on_first_run, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false,
+ output_stage, false /*fp_mixed_precision*/, false /*fast_math*/, false /*broadcast_bias*/,
+ arm_compute::ActivationLayerInfo(), false /* fixed_format */, arm_compute::WeightFormat::UNSPECIFIED,
+ false /* pretranspose_B */, accumulate));
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // If the QuantizationInfo is dynamic, it needs to be settable after configure (note that we also force it to be dynamic)
+ if (dynamic_qinfo)
+ {
+ a.info()->set_quantization_info(QuantizationInfo(a_qinfo.scale(), a_qinfo.offset(), true));
+ b.info()->set_quantization_info(QuantizationInfo(b_qinfo.scale(), b_qinfo.offset(), true));
+ }
+
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
+
+ add_padding_x({ &a, &b, &output });
// Allocate tensors
a.allocator()->allocate();
b.allocator()->allocate();
output.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
// Fill tensors
- fill(AccessorType(a), 0);
- fill(AccessorType(b), 1);
+ fill_quantized(AccessorType(a), 0 + finfo.hash);
+ fill_quantized(AccessorType(b), 1 + finfo.hash);
+
+ if (accumulate)
+ {
+ ARM_COMPUTE_ASSERT(accumulate != run_twice);
+ fill(AccessorType(output), 6 + finfo.hash, finfo.min_output, finfo.max_output);
+ }
if(is_fused)
{
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
bias.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- fill(AccessorType(bias), 2);
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ fill(AccessorType(bias), 2 + finfo.hash, finfo.min_bias, finfo.max_bias);
}
+
+ // Run with variable inputs.
+ if(run_twice)
+ {
+ gemmlowp.run();
+ fill_quantized(AccessorType(a), 3 + finfo.hash); // Fill tensors with new seed after run
+ fill_quantized(AccessorType(b), 4 + finfo.hash);
+ if(is_fused)
+ {
+ fill(AccessorType(bias), 5 + finfo.hash, finfo.min_bias, finfo.max_bias);
+ }
+ }
+
// Compute GEMM function
gemmlowp.run();
return output;
}
-template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t>
-SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
- DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, QuantizationInfo b_qinfo = QuantizationInfo())
+template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t, bool pretranspose_A = false, bool pretranspose_B = false, bool run_twice = false>
+SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo,
+ DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, const TensorFillInfo& finfo = TensorFillInfo())
{
+ ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a));
+ ARM_COMPUTE_ASSERT(data_type_a == data_type_b);
TensorShape shape_a_to_use = shape_a;
if(reinterpret_input_as_3d)
{
@@ -166,101 +194,269 @@ SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, con
}
// Create reference
- SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1 };
- SimpleTensor<TW> b{ shape_b, data_type_b, 1, data_type_b == DataType::QSYMM8_PER_CHANNEL ? b_qinfo : QuantizationInfo(1.0f / 255, b_offset) };
+ SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1, a_qinfo };
+ SimpleTensor<TW> b{ shape_b, data_type_b, 1, b_qinfo };
+
+ TensorShape shape_a_to_use_transposed{ shape_a_to_use };
+ TensorShape shape_b_transposed{ shape_b };
+
+ shape_a_to_use_transposed.set(0, shape_a_to_use[1]);
+ shape_a_to_use_transposed.set(1, shape_a_to_use[0]);
+ shape_b_transposed.set(0, shape_b[1]);
+ shape_b_transposed.set(1, shape_b[0]);
+
+ SimpleTensor<TI> a_transposed{ shape_a_to_use_transposed, data_type_a, 1, a_qinfo };
+ SimpleTensor<TW> b_transposed{ shape_b_transposed, data_type_b, 1, b_qinfo };
// Fill reference
- fill(a, 0);
- fill(b, 1);
- return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>(a, b, shape_output, a_offset, b_offset);
-}
+ fill_quantized(a, 0 + finfo.hash);
+ fill_quantized(b, 1 + finfo.hash);
+
+ // Transpose reference if required
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+ if(pretranspose_A)
+ {
+ transpose_matrix<TI>(a, a_transposed);
+ }
+
+ if(pretranspose_B)
+ {
+ transpose_matrix<TW>(b, b_transposed);
+ }
+
+ // Run with variable inputs.
+ const int32_t a_offset = a_qinfo.uniform().offset;
+ const int32_t b_offset = b_qinfo.uniform().offset;
+
+ if(run_twice)
+ {
+ reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>((pretranspose_A ? a_transposed : a), (pretranspose_B ? b_transposed : b), shape_output, a_offset, b_offset);
+ fill_quantized((pretranspose_A) ? a_transposed : a, 3 + finfo.hash);
+ fill_quantized((pretranspose_B) ? b_transposed : b, 4 + finfo.hash);
+ }
+
+ return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>((pretranspose_A ? a_transposed : a), (pretranspose_B ? b_transposed : b), shape_output, a_offset, b_offset);
}
+} // namespace
-template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
-class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false>
+class GEMMLowpGenericMatrixMultiplyCoreValidationFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, bool accumulate=false, bool dynamic_qinfo = false)
{
- _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset);
- _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset);
+ const auto a_qinfo = QuantizationInfo(1.0f / 255, a_offset);
+ const auto b_qinfo = QuantizationInfo(1.0f / 255, b_offset);
+ TensorFillInfo finfo;
+ _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo);
+ _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate);
}
protected:
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, const bool accumulate, const bool dynamic_qinfo)
{
- return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset);
+ const auto output_qinfo = QuantizationInfo(); // No output stage
+ return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, DataType::QASYMM8, DataType::QASYMM8, GEMMLowpOutputStageInfo(), false, finfo, accumulate, dynamic_qinfo);
}
- SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
+ SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, bool accumulate)
{
- return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
+ SimpleTensor<int32_t> ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, uint8_t, uint8_t, false, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo,
+ DataType::QASYMM8, DataType::QASYMM8, finfo);
+
+ if (accumulate)
+ {
+ SimpleTensor<int32_t> output{ shape_output, DataType::S32, 1 };
+ fill(output, 6 + finfo.hash, finfo.min_output, finfo.max_output);
+ reference::arithmetic_operation<int32_t>(reference::ArithmeticOperation::ADD, output, ref_output, output, ConvertPolicy::SATURATE);
+ return output;
+ }
+
+ return ref_output;
}
TensorType _target{};
SimpleTensor<int32_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t>
-class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false>
+class GEMMLowpMatrixMultiplyCoreValidationFixture : protected GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>
{
public:
- template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, DataType data_type_b)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
{
- ARM_COMPUTE_EXPECT(output_stage.type != GEMMLowpOutputStageType::NONE, framework::LogLevel::ERRORS);
- DataType data_type_a = data_type_b == DataType::QASYMM8_SIGNED ? DataType::QASYMM8_SIGNED : DataType::QASYMM8;
+ GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>::setup(shape_a, shape_b, shape_output, a_offset, b_offset, false /* accumulate */);
+ }
+};
- if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
- {
- output_stage.is_quantized_per_channel = true;
- const size_t num_channels = shape_b[0];
- std::vector<float> scales(num_channels);
- std::uniform_real_distribution<> distribution(0, 1);
- library->fill(scales, distribution, 0);
- output_stage.gemmlowp_multipliers.resize(num_channels);
- output_stage.gemmlowp_shifts.resize(num_channels);
- for(size_t i = 0; i < num_channels; ++i)
- {
- quantization::calculate_quantized_multiplier(scales[i], &output_stage.gemmlowp_multipliers[i], &output_stage.gemmlowp_shifts[i]);
- }
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false>
+class GEMMLowpMatrixMultiplyAccumulateValidationFixture : protected GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
+ {
+ GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>::setup(shape_a, shape_b, shape_output, a_offset, b_offset, true /* accumulate */);
+ }
+};
- _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
- _target = compute_target(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
- }
- else
- {
- _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
- _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
- }
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false>
+class GEMMLowpMatrixMultiplyCoreDynamicQuantizationFixture : protected GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
+ {
+ GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>::setup(shape_a, shape_b, shape_output, a_offset, b_offset, false /* accumulate */, true /* dynamic_qinfo */);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t, bool run_twice = false>
+class GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture
+{
+public:
+ /** Dynamically initialize the quantization info with saturation awareness
+ */
+ template <typename T>
+ static void setup_quantization(DataType data_type, const TensorShape& shape_a, const TensorShape& shape_b, QuantizationInfo& a_qinfo, QuantizationInfo& b_qinfo, QuantizationInfo& output_qinfo, TensorFillInfo& finfo)
+ {
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ finfo.hash = shape_a[0] + shape_a[1] + shape_b[0] + shape_b[1];
+
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + finfo.hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ a_qinfo = QuantizationInfo(scale_lhs, offset_lhs);
+ b_qinfo = QuantizationInfo(scale_rhs, offset_rhs);
+
+ // reinterpret_input_as_3d or reinterpret_output_as_3d can be ignored, as the underlying gemm / matmul computation
+ // is equivalent to a standard 2D one with m-n-k dimensions
+ const int m = shape_a.y();
+ const int n = shape_b.x();
+ const int k = shape_a.x();
+
+ const float bias_fraction = 0.5f; // We enabled is_fused in compute_gemmlowp_target below, thus bias is included
+
+ QuantizationHint q_hint = suggest_matmul_dst_q_info_and_bias(a_qinfo, b_qinfo, m, n, k, data_type, bias_fraction);
+ output_qinfo = q_hint.q_info;
+ finfo.min_bias = q_hint.bias_min;
+ finfo.max_bias = q_hint.bias_max;
+
+ // Both target and reference implementations use negated offsets, i.e.
+ // float_val = (int_val + offset) * scale
+ // instead of
+ // float_val = (int_val - offset) * scale
+ // as usual. Therefore, after calculating the output quantization above, we
+ // negate the offsets of inputs' offsets.
+ a_qinfo = QuantizationInfo(scale_lhs, -offset_lhs);
+ b_qinfo = QuantizationInfo(scale_rhs, -offset_rhs);
+ }
+
+ /** Initialize output stage info from quantization info */
+ static Status init_gemmlowp_output_stage_info(
+ DataType data_type,
+ const QuantizationInfo& a_qinfo,
+ const QuantizationInfo& b_qinfo,
+ const QuantizationInfo& output_qinfo,
+ GEMMLowpOutputStageType type,
+ GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(!is_data_type_quantized_asymmetric(data_type));
+
+ const UniformQuantizationInfo aq_unif = a_qinfo.uniform();
+ const UniformQuantizationInfo bq_unif = b_qinfo.uniform();
+ const UniformQuantizationInfo oq_unif = output_qinfo.uniform();
+
+ float multiplier = (aq_unif.scale * bq_unif.scale) / oq_unif.scale;
+ int32_t int_multiplier;
+ int32_t shift;
+
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ quantization::calculate_quantized_multiplier(multiplier, &int_multiplier, &shift));
+
+ int32_t type_min = 0;
+ int32_t type_max = 0;
+ std::tie(type_min, type_max) = quantization::get_quantized_asymmetric_output_min_max(output_qinfo, ActivationLayerInfo(), data_type);
+
+ gemmlowp_output_stage_info.gemmlowp_real_multiplier = multiplier;
+ gemmlowp_output_stage_info.gemmlowp_multiplier = int_multiplier;
+ gemmlowp_output_stage_info.gemmlowp_multipliers = { int_multiplier };
+ gemmlowp_output_stage_info.gemmlowp_shift = shift;
+ gemmlowp_output_stage_info.gemmlowp_shifts = { shift };
+ gemmlowp_output_stage_info.gemmlowp_offset = oq_unif.offset;
+ gemmlowp_output_stage_info.type = type;
+ gemmlowp_output_stage_info.gemmlowp_min_bound = type_min;
+ gemmlowp_output_stage_info.gemmlowp_max_bound = type_max;
+
+ return Status{};
+ }
+
+ /** Currently this fixture only tests the following data type configurations:
+ *
+ * 1. a and b are of the same data type
+ * 2. The data type is quantized asymmetric
+ *
+ */
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, GEMMLowpOutputStageType output_stage_type, DataType data_type,
+ bool reshape_b_only_on_first_run)
+ {
+ ARM_COMPUTE_ASSERT(output_stage_type != GEMMLowpOutputStageType::NONE);
+ ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type));
+
+ // Randomized dynamic quantization: randomize quantization info in a way that ensures no result saturation
+ // most of the time
+ QuantizationInfo a_qinfo;
+ QuantizationInfo b_qinfo;
+ QuantizationInfo output_qinfo;
+ TensorFillInfo finfo;
+ setup_quantization<TI>(data_type, shape_a, shape_b, a_qinfo, b_qinfo, output_qinfo, finfo);
+
+ GEMMLowpOutputStageInfo output_stage;
+ init_gemmlowp_output_stage_info(data_type, a_qinfo, b_qinfo, output_qinfo, output_stage_type, output_stage);
+
+ _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type, data_type, output_stage, finfo);
+ _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, data_type, data_type, output_stage, reshape_b_only_on_first_run, finfo);
}
protected:
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage,
- DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const QuantizationInfo& output_qinfo,
+ DataType data_type_a, DataType data_type_b, const GEMMLowpOutputStageInfo& output_stage, bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo())
{
- return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset,
- output_stage, data_type_a, data_type_b, b_qinfo);
+ return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true, run_twice>(shape_a, shape_b, shape_output, a_qinfo,
+ b_qinfo, output_qinfo, data_type_a, data_type_b, output_stage, reshape_b_only_on_first_run, finfo);
}
- SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
- GEMMLowpOutputStageInfo output_stage, DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
+ SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo,
+ DataType data_type_a, DataType data_type_b, const GEMMLowpOutputStageInfo& output_stage, const TensorFillInfo& finfo = TensorFillInfo())
{
- SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW>(shape_a, shape_b, shape_output, a_offset, b_offset, data_type_a, data_type_b, b_qinfo);
+ SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW, false, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type_a, data_type_b, finfo);
TensorShape bias_shape(shape_b[0]);
SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
- fill(bias, 2);
+ (run_twice) ? fill(bias, 5 + finfo.hash, finfo.min_bias, finfo.max_bias) : fill(bias, 2 + finfo.hash, finfo.min_bias, finfo.max_bias); // Fill bias with same seed as last run of gemmlowp_target
switch(output_stage.type)
{
case GEMMLowpOutputStageType::QUANTIZE_DOWN:
- return reference::gemmlowp_quantize_down_scale<int32_t, TW>(output, bias,
+ return reference::gemmlowp_quantize_down_scale<int32_t, TI>(output, bias,
output_stage.gemmlowp_offset, output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
break;
case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
- return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TW>(output, bias,
+ return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TI>(output, bias,
output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
break;
default:
@@ -272,11 +468,78 @@ protected:
SimpleTensor<TI> _reference{};
};
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false>
+class GEMMLowpDequantizedMatrixMultiplyValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, bool accumulate)
+ {
+ const bool dynamic_qinfo = false;
+ const auto a_qinfo = QuantizationInfo(1.0f / 255, a_offset);
+ const auto b_qinfo = QuantizationInfo(5.0f / 255, b_offset);
+ TensorFillInfo finfo;
+ _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo);
+ _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo);
+ }
+
+protected:
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, const bool accumulate, const bool dynamic_qinfo)
+ {
+ const auto output_qinfo = QuantizationInfo();
+ return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, GEMMLowpOutputStageInfo(), false, finfo, accumulate, dynamic_qinfo, DataType::F32);
+ }
+
+ SimpleTensor<float> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, bool accumulate, const bool dynamic_qinfo)
+ {
+ QuantizationInfo s32_ref_output_quant_info = QuantizationInfo(a_qinfo.uniform().scale * b_qinfo.uniform().scale, 0, dynamic_qinfo);
+
+ SimpleTensor<int32_t> s32_ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, int8_t, int8_t, false, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo,
+ DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, finfo);
+ s32_ref_output.quantization_info(s32_ref_output_quant_info);
+
+ SimpleTensor<float> f32_ref_output(s32_ref_output.shape(), DataType::F32);
+ f32_ref_output = reference::dequantization_layer<float, int32_t>(s32_ref_output);
+
+ if (accumulate)
+ {
+ SimpleTensor<float> output{ shape_output, DataType::F32, 1 };
+ fill(output, 6 + finfo.hash, finfo.min_output, finfo.max_output);
+ reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, output, f32_ref_output, output, ConvertPolicy::SATURATE);
+ return output;
+ }
+
+ return f32_ref_output;
+ }
+
+ TensorType _target{};
+ SimpleTensor<float> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t, bool run_twice = false>
+class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, GEMMLowpOutputStageType output_stage_type, DataType data_type, bool reshape_b_only_on_first_run)
+ {
+ GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice>::setup(shape_a, shape_b,
+ shape_output, output_stage_type, data_type, reshape_b_only_on_first_run);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t, bool run_twice = false>
+class GEMMLowpBatchedMatrixMultiplyCoreFusedOffsetOutputFixture : public GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, GEMMLowpOutputStageType output_stage_type, DataType data_type, bool reshape_b_only_on_first_run)
+ {
+ GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice>::setup(shape_a, shape_b, shape_output, output_stage_type, data_type, reshape_b_only_on_first_run);
+ }
+};
+
template <typename TensorType, typename AccessorType, typename FunctionType>
class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
{
_target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
@@ -312,27 +575,27 @@ protected:
output_stage_info.output_data_type = DataType::QASYMM8;
output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(c.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
c.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
// Fill tensor
fill(AccessorType(a), 0);
if(add_bias)
{
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate bias tensor
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensor
fill(AccessorType(b), 1);
@@ -378,7 +641,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType>
class GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
{
_target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
@@ -414,27 +676,27 @@ protected:
output_stage_info.output_data_type = DataType::QASYMM8_SIGNED;
output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(c.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
c.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
// Fill tensor
fill(AccessorType(a), 0);
if(add_bias)
{
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate bias tensor
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensor
fill(AccessorType(b), 1);
@@ -480,7 +742,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType>
class GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
{
_target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
@@ -508,27 +769,27 @@ protected:
FunctionType output_stage;
output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(c.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
c.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
// Fill tensor
fill(AccessorType(a), 0);
if(add_bias)
{
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate bias tensor
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensor
fill(AccessorType(b), 1);
@@ -575,7 +836,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType>
class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
{
_target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
@@ -603,27 +863,27 @@ protected:
FunctionType output_stage;
output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(c.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
c.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
// Fill tensor
fill(AccessorType(a), 0);
if(add_bias)
{
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate bias tensor
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensor
fill(AccessorType(b), 1);
@@ -670,7 +930,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(DataType data_type, TensorShape shape, float result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
{
_target = compute_target(data_type, shape, result_real_multiplier, result_offset, min, max, add_bias);
@@ -708,27 +967,27 @@ protected:
FunctionType output_stage;
output_stage.configure(&a, add_bias ? &b : nullptr, &c, info);
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(c.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
c.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
// Fill tensor
fill(AccessorType(a), 0);
if(add_bias)
{
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate bias tensor
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensor
fill(AccessorType(b), 1);
@@ -773,7 +1032,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType>
class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
{
_target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
@@ -801,27 +1059,27 @@ protected:
FunctionType output_stage;
output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(c.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
c.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
// Fill tensor
fill(AccessorType(a), 0);
if(add_bias)
{
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate bias tensor
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensor
fill(AccessorType(b), 1);
@@ -864,11 +1122,10 @@ protected:
SimpleTensor<int16_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
bool interleave_rhs, DataType data_type)
{
@@ -934,15 +1191,17 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -951,20 +1210,23 @@ protected:
rhs_reshaped.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1010,11 +1272,10 @@ protected:
SimpleTensor<int32_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
bool interleave_lhs, bool interleave_rhs, DataType data_type)
{
@@ -1084,15 +1345,17 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+
+ add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -1101,20 +1364,23 @@ protected:
rhs_reshaped.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1162,11 +1428,10 @@ protected:
SimpleTensor<int32_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
{
@@ -1235,13 +1500,15 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -1249,18 +1516,20 @@ protected:
rhs_reshaped.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1301,11 +1570,372 @@ protected:
SimpleTensor<int32_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename T, typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType, typename ReduceOperation, typename CastOperation>
+class GEMMLowpMatrixMultiplyReshapedOnlyRHSMMULOutputStageValidationFixture : public framework::Fixture
+{
+public:
+ void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
+ unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, bool broadcast_bias, DataType data_type)
+ {
+ GEMMLowpOutputStageInfo output_stage;
+ output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ output_stage.output_data_type = data_type;
+ output_stage.gemmlowp_multipliers = std::vector<int32_t> { 1 };
+ output_stage.gemmlowp_shifts = std::vector<int32_t> { 1 };
+ output_stage.gemmlowp_multipliers[0] = 1;
+ output_stage.gemmlowp_shifts[0] = 1;
+ output_stage.gemmlowp_offset = 0;
+ constexpr float scale = 0.001f;
+ quantization::calculate_quantized_multiplier(scale, &output_stage.gemmlowp_multipliers[0], &output_stage.gemmlowp_shifts[0]);
+ output_stage.gemmlowp_min_bound = -100;
+ output_stage.gemmlowp_max_bound = 100;
+
+ GEMMLHSMatrixInfo lhs_info;
+ lhs_info.m0 = m0;
+ lhs_info.k0 = k0;
+
+ GEMMRHSMatrixInfo rhs_info;
+ rhs_info.n0 = n0;
+ rhs_info.k0 = k0;
+ rhs_info.h0 = h0;
+ rhs_info.interleave = interleave_rhs;
+ rhs_info.transpose = transpose_rhs;
+
+ int a_offset = 1;
+ int b_offset = 1;
+
+ // Set the tensor shapes for LHS and RHS matrices
+ const TensorShape lhs_shape(k, m, batch_size);
+ const TensorShape rhs_shape(n, k, batch_size);
+ const TensorShape bias_shape(n,
+ broadcast_bias ? 1 : m,
+ broadcast_bias ? 1 : batch_size);
+
+ _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, output_stage, a_offset, b_offset);
+ if(gemm_validated == true)
+ {
+ _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, output_stage, a_offset, b_offset);
+ }
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::QASYMM8:
+ {
+ // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+ std::uniform_int_distribution<> distribution(1, 254);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ case DataType::QASYMM8_SIGNED:
+ {
+ std::uniform_int_distribution<> distribution(-127, 126);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<> distribution(-10000, 10000);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
+ }
+
+ TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info, DataType data_type, GEMMLowpOutputStageInfo output_stage, const int a_offset, const int b_offset)
+ {
+ // Create tensors
+ TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, a_offset));
+ TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, b_offset));
+ TensorType bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
+ TensorType dst;
+ TensorType rhs_reshaped;
+
+ const unsigned int M = lhs_shape[1];
+ const unsigned int N = rhs_shape[0];
+ const unsigned int K = lhs_shape[0];
+
+ // Tensors for precomputing sum of lhs rows / rhs columns
+ TensorType vec_sum_rows = create_tensor<TensorType>(TensorShape(M, 1, lhs_shape[2]), DataType::S32, 1);
+ TensorType vec_sum_cols = create_tensor<TensorType>(TensorShape(N, 1, rhs_shape[2]), DataType::S32, 1);
+
+ GEMMKernelInfo gemm_info;
+ gemm_info.m = M;
+ gemm_info.n = N;
+ gemm_info.k = K;
+ gemm_info.lhs_info = lhs_info;
+ gemm_info.rhs_info = rhs_info;
+ gemm_info.output_stage = output_stage;
+ gemm_info.a_offset = a_offset;
+ gemm_info.b_offset = b_offset;
+ // The output tensor will be auto-initialized within the function
+
+ // Create and configure function
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMFunctionType gemm;
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+
+ // If GEMM is not validated, do not try to run. The validation will check
+ // if the technology supports this extension. If not, the test will be skipped.
+ // If it supports, the test will fail anyway because target and reference
+ // will not match.
+ gemm_validated = bool(gemm.validate(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, vec_sum_cols.info(), vec_sum_rows.info(), bias.info()));
+ if(gemm_validated == true)
+ {
+ gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, vec_sum_cols.info(), vec_sum_rows.info(), bias.info());
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+
+ // Allocate tensors
+ lhs.allocator()->allocate();
+ rhs.allocator()->allocate();
+ rhs_reshaped.allocator()->allocate();
+ bias.allocator()->allocate();
+ vec_sum_cols.allocator()->allocate();
+ vec_sum_rows.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!vec_sum_cols.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!vec_sum_rows.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(lhs), 0);
+ fill(AccessorType(rhs), 1);
+ fill(AccessorType(bias), 2);
+
+ TensorType lhs_32 = create_tensor<TensorType>(lhs_shape, DataType::S32, 1);
+ TensorType rhs_32 = create_tensor<TensorType>(rhs_shape, DataType::S32, 1);
+ CastOperation cast_lhs;
+ CastOperation cast_rhs;
+ cast_lhs.configure(&lhs, &lhs_32, ConvertPolicy::SATURATE);
+ cast_rhs.configure(&rhs, &rhs_32, ConvertPolicy::SATURATE);
+ lhs_32.allocator()->allocate();
+ rhs_32.allocator()->allocate();
+ cast_lhs.run();
+ cast_rhs.run();
+
+ ReduceOperation lhs_sum_rows;
+ ReduceOperation rhs_sum_cols;
+
+ lhs_sum_rows.configure(&lhs_32, &vec_sum_rows, 0, ReductionOperation::SUM, false);
+ rhs_sum_cols.configure(&rhs_32, &vec_sum_cols, 1, ReductionOperation::SUM);
+
+ lhs_sum_rows.run();
+ rhs_sum_cols.run();
+
+ // Compute GEMM
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_SRC_2, &bias }, { ACL_DST, &dst }, { ACL_VEC_COL_SUM, &vec_sum_cols }, { ACL_VEC_ROW_SUM, &vec_sum_rows } });
+ gemm.run(gemm_pack);
+ }
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, GEMMLowpOutputStageInfo output_stage,
+ const int a_offset, const int b_offset)
+ {
+ TensorShape dst_shape = lhs_shape;
+ dst_shape[0] = rhs_shape[0];
+ dst_shape[1] = lhs_shape[1];
+
+ // Create reference
+ SimpleTensor<T> lhs{ lhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, a_offset) };
+ SimpleTensor<T> rhs{ rhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, b_offset) };
+ SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
+ SimpleTensor<int32_t> dst{ dst_shape, DataType::S32, 1 };
+ SimpleTensor<T> dst_final{ dst_shape, data_type, 1 };
+
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
+ fill(bias, 2);
+
+ dst = reference::gemmlowp_matrix_multiply_core<int32_t, T>(lhs, rhs, dst_shape, a_offset, b_offset);
+ dst_final = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, T>(dst, bias,
+ output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
+ return dst_final;
+ }
+
+ bool gemm_validated = true;
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
+class GEMMLowpMatrixMultiplyReshapedOnlyRHSMMULValidationFixture : public framework::Fixture
+{
+public:
+ void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
+ unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
+ {
+ GEMMLHSMatrixInfo lhs_info;
+ lhs_info.m0 = m0;
+ lhs_info.k0 = k0;
+
+ GEMMRHSMatrixInfo rhs_info;
+ rhs_info.n0 = n0;
+ rhs_info.k0 = k0;
+ rhs_info.h0 = h0;
+ rhs_info.interleave = interleave_rhs;
+ rhs_info.transpose = transpose_rhs;
+
+ // Set the tensor shapes for LHS and RHS matrices
+ const TensorShape lhs_shape(k, m, batch_size);
+ const TensorShape rhs_shape(n, k, batch_size);
+
+ _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
+ if(gemm_validated == true)
+ {
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type);
+ }
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::QASYMM8:
+ {
+ // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+ std::uniform_int_distribution<> distribution(1, 254);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ case DataType::QASYMM8_SIGNED:
+ {
+ std::uniform_int_distribution<> distribution(-127, 126);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
+ }
+
+ TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
+ {
+ // Create tensors
+ TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
+ TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
+ TensorType rhs_reshaped;
+ TensorType dst;
+
+ const unsigned int M = lhs_shape[1];
+ const unsigned int N = rhs_shape[0];
+ const unsigned int K = lhs_shape[0];
+
+ GEMMKernelInfo gemm_info;
+ gemm_info.m = M;
+ gemm_info.n = N;
+ gemm_info.k = K;
+ gemm_info.lhs_info = lhs_info;
+ gemm_info.rhs_info = rhs_info;
+ // The output tensor will be auto-initialized within the function
+
+ // Create and configure function
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMFunctionType gemm;
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+
+ // If GEMM is not validated, do not try to run. The validation will check
+ // if the technology supports this extension. If not, the test will be skipped.
+ // If it supports, the test will fail anyway because target and reference
+ // will not match.
+ gemm_validated = bool(gemm.validate(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, nullptr, nullptr, nullptr));
+ if(gemm_validated == true)
+ {
+ gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, nullptr, nullptr, nullptr);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+
+ // Allocate tensors
+ lhs.allocator()->allocate();
+ rhs.allocator()->allocate();
+ rhs_reshaped.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(lhs), 0);
+ fill(AccessorType(rhs), 1);
+
+ // Compute GEMM
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
+ }
+
+ return dst;
+ }
+
+ SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
+ {
+ TensorShape dst_shape = lhs_shape;
+ dst_shape[0] = rhs_shape[0];
+ dst_shape[1] = lhs_shape[1];
+
+ if(data_type == DataType::QASYMM8)
+ {
+ // Create reference
+ SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
+ SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
+ SimpleTensor<int32_t> dst{ dst_shape, DataType::S32, 1 };
+
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
+
+ return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+ }
+ else
+ {
+ // Create reference
+ SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
+ SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
+ SimpleTensor<int32_t> dst{ dst_shape, DataType::S32, 1 };
+
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
+
+ return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
+ }
+ }
+
+ bool gemm_validated = true;
+ TensorType _target{};
+ SimpleTensor<int32_t> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
{
@@ -1378,13 +2008,15 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info);
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
// Allocate tensors
lhs.allocator()->allocate();
@@ -1392,18 +2024,20 @@ protected:
rhs_reshaped.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1450,7 +2084,6 @@ template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyNativeValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
{
GEMMLHSMatrixInfo lhs_info;
@@ -1493,26 +2126,29 @@ protected:
// Create and configure function
GEMMFunctionType gemm;
- gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
+ gemm.configure(lhs.info(), rhs.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
+
+ add_padding_x({ &lhs, &rhs, &dst });
// Allocate tensors
lhs.allocator()->allocate();
rhs.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
fill(AccessorType(rhs), 1);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs }, { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1542,7 +2178,6 @@ template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyNative3DValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
{
GEMMLHSMatrixInfo lhs_info;
@@ -1588,26 +2223,29 @@ protected:
// Create and configure function
GEMMFunctionType gemm;
- gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
+ gemm.configure(lhs.info(), rhs.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
+
+ ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
- ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &lhs, &rhs, &dst });
// Allocate tensors
lhs.allocator()->allocate();
rhs.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(lhs), 0);
fill(AccessorType(rhs), 1);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs }, { ACL_DST, &dst } });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1637,4 +2275,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_GEMMLOWPFIXTURE_H
diff --git a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
index 3a5ab7c5e1..d88029f93e 100644
--- a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
+++ b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,11 +46,10 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool reinterpret_input_as_3d = false>
+template <typename TensorType, typename AccessorType, typename OperatorType, typename T, bool reinterpret_input_as_3d = false>
class GEMMReshapeLHSMatrixValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape_in, unsigned int batch_size, DataType data_type, unsigned int m0, unsigned int k0, unsigned int v0, bool interleave, bool transpose)
{
GEMMLHSMatrixInfo lhs_info;
@@ -86,23 +85,26 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- FunctionType gemm_lhs_reshape;
- gemm_lhs_reshape.configure(&src, &dst, lhs_info, reinterpret_input_as_3d);
+ OperatorType gemm_lhs_reshape;
+ gemm_lhs_reshape.configure(src.info(), dst.info(), lhs_info, reinterpret_input_as_3d);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+
+ add_padding_x({ &src, &dst });
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
// Compute GEMM LHS matrix reshape function
- gemm_lhs_reshape.run();
+ ITensorPack tensors = { { ACL_SRC, &src }, { ACL_DST, &dst } };
+ gemm_lhs_reshape.run(tensors);
return dst;
}
diff --git a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
index e03c4f39b8..0929faf04a 100644
--- a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
+++ b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,11 +46,10 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename OperatorType, typename T>
class GEMMReshapeRHSMatrixValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape_in, unsigned int batch_size, DataType data_type, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave, bool transpose)
{
GEMMRHSMatrixInfo rhs_info;
@@ -85,23 +84,26 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- FunctionType gemm_rhs_reshape;
- gemm_rhs_reshape.configure(&src, &dst, rhs_info);
+ OperatorType gemm_rhs_reshape;
+ gemm_rhs_reshape.configure(src.info(), dst.info(), rhs_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+
+ add_padding_x({ &src, &dst });
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
// Compute GEMM RHS matrix reshape function
- gemm_rhs_reshape.run();
+ ITensorPack tensors = { { ACL_SRC, &src }, { ACL_DST, &dst } };
+ gemm_rhs_reshape.run(tensors);
return dst;
}
diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
index 89d2238344..3765515b57 100644
--- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h
+++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class GEMMTranspose1xWValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(size_t x, size_t y, DataType data_type)
{
_data_type = data_type;
@@ -64,9 +63,14 @@ protected:
switch(tensor.data_type())
{
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -84,24 +88,25 @@ protected:
// Create and configure function
FunctionType f;
- f.configure(&a, &b);
+ f.configure(a.info(), b.info());
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
// Allocate tensors
a.allocator()->allocate();
b.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
// Fill tensors
fill(AccessorType(a), 0);
fill(AccessorType(b), 1);
// Compute GEMM function
- f.run();
+ ITensorPack tensors{ { ACL_SRC, &a }, { ACL_DST, &b } };
+ f.run(tensors);
return b;
}
diff --git a/tests/validation/fixtures/GatherFixture.h b/tests/validation/fixtures/GatherFixture.h
index f2dcd4a1a4..857b0387b7 100644
--- a/tests/validation/fixtures/GatherFixture.h
+++ b/tests/validation/fixtures/GatherFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class GatherFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape indices_shape, int axis, DataType data_type)
{
_target = compute_target(input_shape, data_type, axis, indices_shape);
@@ -67,9 +66,11 @@ protected:
std::mt19937 gen(library->seed());
uint32_t *indices_ptr = static_cast<uint32_t *>(indices.data());
- std::uniform_int_distribution<uint32_t> dist_index(0, input_shape[actual_axis] - 1);
- //Let's consider 1D indices
- for(unsigned int ind = 0; ind < indices_shape[0]; ind++)
+ // 10% of the time the index is out-of-range.
+ uint32_t max_index = input_shape[actual_axis] + input_shape[actual_axis] / 9 + 1;
+ std::uniform_int_distribution<uint32_t> dist_index(0, max_index - 1);
+
+ for(unsigned int ind = 0; ind < indices_shape.total_size(); ind++)
{
indices_ptr[ind] = dist_index(gen);
}
@@ -91,18 +92,18 @@ protected:
FunctionType gather;
gather.configure(&src, &indices_tensor, &dst, axis);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(indices_tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(indices_tensor.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
indices_tensor.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!indices_tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!indices_tensor.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/Gaussian3x3Fixture.h b/tests/validation/fixtures/Gaussian3x3Fixture.h
deleted file mode 100644
index 396e63e73f..0000000000
--- a/tests/validation/fixtures/Gaussian3x3Fixture.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_GAUSSIAN3X3_FIXTURE
-#define ARM_COMPUTE_TEST_GAUSSIAN3X3_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Gaussian3x3.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class Gaussian3x3ValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, data_type, border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType gaussian3x3;
- gaussian3x3.configure(&src, &dst, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- gaussian3x3.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::gaussian3x3<T>(src, border_mode, constant_border_value);
- }
-
- BorderMode _border_mode{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GAUSSIAN3X3_FIXTURE */
diff --git a/tests/validation/fixtures/Gaussian5x5Fixture.h b/tests/validation/fixtures/Gaussian5x5Fixture.h
deleted file mode 100644
index 31d7acf0f1..0000000000
--- a/tests/validation/fixtures/Gaussian5x5Fixture.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_GAUSSIAN5X5_FIXTURE
-#define ARM_COMPUTE_TEST_GAUSSIAN5X5_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Gaussian5x5.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class Gaussian5x5ValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, data_type, border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType gaussian5x5;
- gaussian5x5.configure(&src, &dst, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- gaussian5x5.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::gaussian5x5<T>(src, border_mode, constant_border_value);
- }
-
- BorderMode _border_mode{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GAUSSIAN5X5_FIXTURE */
diff --git a/tests/validation/fixtures/GaussianPyramidHalfFixture.h b/tests/validation/fixtures/GaussianPyramidHalfFixture.h
deleted file mode 100644
index ef7657a93c..0000000000
--- a/tests/validation/fixtures/GaussianPyramidHalfFixture.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_GAUSSIAN_PYRAMID_HALF_FIXTURE
-#define ARM_COMPUTE_TEST_GAUSSIAN_PYRAMID_HALF_FIXTURE
-
-#include "arm_compute/core/IPyramid.h"
-#include "arm_compute/core/PyramidInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/GaussianPyramidHalf.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename PyramidType>
-class GaussianPyramidHalfValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, BorderMode border_mode, size_t num_levels)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- _border_mode = border_mode;
-
- // Compute target and reference
- compute_target(shape, border_mode, constant_border_value, num_levels);
- compute_reference(shape, border_mode, constant_border_value, num_levels);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- void compute_target(const TensorShape &shape, BorderMode border_mode, uint8_t constant_border_value, size_t num_levels)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, DataType::U8);
-
- PyramidInfo pyramid_info(num_levels, SCALE_PYRAMID_HALF, shape, Format::U8);
- _target.init(pyramid_info);
-
- // Create and configure function
- FunctionType gaussian_pyramid;
-
- gaussian_pyramid.configure(&src, &_target, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- for(size_t i = 0; i < pyramid_info.num_levels(); ++i)
- {
- ARM_COMPUTE_EXPECT(_target.get_pyramid_level(i)->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Allocate input tensor
- src.allocator()->allocate();
-
- // Allocate pyramid
- _target.allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- for(size_t i = 0; i < pyramid_info.num_levels(); ++i)
- {
- ARM_COMPUTE_EXPECT(!_target.get_pyramid_level(i)->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- gaussian_pyramid.run();
- }
-
- void compute_reference(const TensorShape &shape, BorderMode border_mode, uint8_t constant_border_value, size_t num_levels)
- {
- // Create reference
- SimpleTensor<T> src{ shape, DataType::U8 };
-
- // Fill reference
- fill(src);
-
- _reference = reference::gaussian_pyramid_half<T>(src, border_mode, constant_border_value, num_levels);
- }
-
- PyramidType _target{};
- std::vector<SimpleTensor<T>> _reference{};
- BorderMode _border_mode{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GAUSSIAN_PYRAMID_HALF_FIXTURE */ \ No newline at end of file
diff --git a/tests/validation/fixtures/HOGDescriptorFixture.h b/tests/validation/fixtures/HOGDescriptorFixture.h
deleted file mode 100644
index 6097059c84..0000000000
--- a/tests/validation/fixtures/HOGDescriptorFixture.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_HOG_DESCRIPTOR_FIXTURE
-#define ARM_COMPUTE_TEST_HOG_DESCRIPTOR_FIXTURE
-
-#include "arm_compute/core/HOGInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/HOGDescriptor.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename HOGType, typename AccessorType, typename FunctionType, typename T, typename U>
-class HOGDescriptorValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(std::string image, HOGInfo hog_info, Format format, BorderMode border_mode)
- {
- // Only defined borders supported
- ARM_COMPUTE_ERROR_ON(border_mode == BorderMode::UNDEFINED);
-
- // Generate a random constant value
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<T> int_dist(0, 255);
- const T constant_border_value = int_dist(gen);
-
- _target = compute_target(image, format, border_mode, constant_border_value, hog_info);
- _reference = compute_reference(image, format, border_mode, constant_border_value, hog_info);
- }
-
-protected:
- template <typename V>
- void fill(V &&tensor, const std::string image, Format format)
- {
- library->fill(tensor, image, format);
- }
-
- template <typename V, typename D>
- void fill(V &&tensor, int i, D max)
- {
- library->fill_tensor_uniform(tensor, i, static_cast<D>(0), max);
- }
-
- TensorType compute_target(const std::string image, Format &format, BorderMode &border_mode, T constant_border_value, const HOGInfo &hog_info)
- {
- // Get image shape for src tensor
- TensorShape shape = library->get_image_shape(image);
-
- // Create tensor info for HOG descriptor
- TensorInfo tensor_info_hog_descriptor(hog_info, shape.x(), shape.y());
-
- // Create HOG
- HOGType hog = create_HOG<HOGType>(hog_info);
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type_from_format(format));
- TensorType dst = create_tensor<TensorType>(tensor_info_hog_descriptor.tensor_shape(), DataType::F32, tensor_info_hog_descriptor.num_channels());
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- FunctionType hog_descriptor;
- hog_descriptor.configure(&src, &dst, &hog, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- const T max = std::numeric_limits<T>::max();
-
- // Fill tensors
- fill(AccessorType(src), image, format);
- fill(AccessorType(dst), 1, static_cast<U>(max));
-
- // Compute function
- hog_descriptor.run();
-
- return dst;
- }
-
- SimpleTensor<U> compute_reference(const std::string image, Format format, BorderMode border_mode, T constant_border_value, const HOGInfo &hog_info)
- {
- // Create reference
- SimpleTensor<T> src{ library->get_image_shape(image), data_type_from_format(format) };
-
- // Fill reference
- fill(src, image, format);
-
- return reference::hog_descriptor<U>(src, border_mode, constant_border_value, hog_info);
- }
-
- TensorType _target{};
- SimpleTensor<U> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_HOG_DESCRIPTOR_FIXTURE */
diff --git a/tests/validation/fixtures/HOGDetectorFixture.h b/tests/validation/fixtures/HOGDetectorFixture.h
deleted file mode 100644
index c2d0514d11..0000000000
--- a/tests/validation/fixtures/HOGDetectorFixture.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_HOG_DETECTOR_FIXTURE
-#define ARM_COMPUTE_TEST_HOG_DETECTOR_FIXTURE
-
-#include "arm_compute/core/HOGInfo.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/IHOGAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/fixtures/HOGDescriptorFixture.h"
-#include "tests/validation/reference/HOGDetector.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType,
- typename HOGType,
- typename DetectionWindowArrayType,
- typename HOGDescriptorType,
- typename AccessorType,
- typename ArrayAccessorType,
- typename HOGAccessorType,
- typename HOGDetectorType,
- typename T,
- typename U>
-class HOGDetectorValidationFixture : public HOGDescriptorValidationFixture<TensorType, HOGType, AccessorType, HOGDescriptorType, T, U>
-{
-public:
- template <typename...>
- void setup(Size2D detection_window_stride, std::string image, HOGInfo hog_info, Format format, BorderMode border_mode)
- {
- using HDF = HOGDescriptorValidationFixture<TensorType, HOGType, AccessorType, HOGDescriptorType, T, U>;
- HDF::setup(image, hog_info, format, border_mode);
-
- const unsigned int max_num_detection_windows = 100000;
-
- // Initialise descriptor (linear SVM coefficients).
- // NOTE: Fixed values are used to keep the number of detection windows detected
- // consistent in order to have meaningful validation tolerances.
- // The values are "unbalanced" to reduce the number of detected objects
- std::random_device::result_type seed = 0;
- std::vector<U> descriptor = generate_random_real(hog_info.descriptor_size(), -0.505f, 0.495f, seed);
-
- // Compute target and reference values using feature vector from descriptor kernel
- _target = compute_target(HDF::_target, descriptor, max_num_detection_windows, hog_info, detection_window_stride);
- _reference = compute_reference(HDF::_reference, descriptor, max_num_detection_windows, hog_info, detection_window_stride);
- }
-
-protected:
- std::vector<DetectionWindow> compute_target(const TensorType &src, const std::vector<U> &descriptor, unsigned int max_num_detection_windows,
- const HOGInfo &hog_info, const Size2D &detection_window_stride)
- {
- // Create HOG
- HOGType hog = create_HOG<HOGType>(hog_info);
-
- // Create array of detection windows
- DetectionWindowArrayType detection_windows(max_num_detection_windows);
-
- // Copy HOG descriptor values to HOG memory
- {
- HOGAccessorType hog_accessor(hog);
- std::memcpy(hog_accessor.descriptor(), descriptor.data(), descriptor.size() * sizeof(U));
- }
-
- // Create and configure function
- HOGDetectorType hog_detector;
- hog_detector.configure(&src, &hog, &detection_windows, detection_window_stride);
-
- // Reset detection windows
- detection_windows.clear();
-
- // Compute function
- hog_detector.run();
-
- // Create array of detection windows
- std::vector<DetectionWindow> windows;
-
- // Copy detection windows
- ArrayAccessorType accessor(detection_windows);
-
- for(size_t i = 0; i < accessor.num_values(); i++)
- {
- DetectionWindow win;
- win.x = accessor.at(i).x;
- win.y = accessor.at(i).y;
- win.width = accessor.at(i).width;
- win.height = accessor.at(i).height;
- win.idx_class = accessor.at(i).idx_class;
- win.score = accessor.at(i).score;
-
- windows.push_back(win);
- }
-
- return windows;
- }
-
- std::vector<DetectionWindow> compute_reference(const SimpleTensor<U> &src, const std::vector<U> &descriptor, unsigned int max_num_detection_windows,
- const HOGInfo &hog_info, const Size2D &detection_window_stride)
- {
- // Assumes defaults value of zero for threshold and class_idx.
- return reference::hog_detector(src, descriptor, max_num_detection_windows, hog_info, detection_window_stride);
- }
-
- std::vector<DetectionWindow> _target{};
- std::vector<DetectionWindow> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_HOG_DETECTOR_FIXTURE */
diff --git a/tests/validation/fixtures/HOGMultiDetectionFixture.h b/tests/validation/fixtures/HOGMultiDetectionFixture.h
deleted file mode 100644
index 039f3f4b74..0000000000
--- a/tests/validation/fixtures/HOGMultiDetectionFixture.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_HOG_MULTI_DETECTION_FIXTURE
-#define ARM_COMPUTE_TEST_HOG_MULTI_DETECTION_FIXTURE
-
-#include "arm_compute/core/HOGInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/IHOGAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/HOGMultiDetection.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType,
- typename HOGType,
- typename MultiHOGType,
- typename DetectionWindowArrayType,
- typename DetectionWindowStrideType,
- typename AccessorType,
- typename Size2DArrayAccessorType,
- typename DetectionWindowArrayAccessorType,
- typename HOGAccessorType,
- typename FunctionType,
- typename T,
- typename U>
-class HOGMultiDetectionValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(std::string image, std::vector<HOGInfo> models, Format format, BorderMode border_mode, bool non_maxima_suppression)
- {
- // Only defined borders supported
- ARM_COMPUTE_ERROR_ON(border_mode == BorderMode::UNDEFINED);
-
- // Generate a random constant value
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<T> int_dist(0, 255);
- const T constant_border_value = int_dist(gen);
-
- // Initialize descriptors vector
- std::vector<std::vector<U>> descriptors(models.size());
-
- // Use default values for threshold and min_distance
- const float threshold = 0.f;
- const float min_distance = 1.f;
-
- // Maximum number of detection windows per batch
- const unsigned int max_num_detection_windows = 100000;
-
- _target = compute_target(image, format, border_mode, constant_border_value, models, descriptors, max_num_detection_windows, threshold, non_maxima_suppression, min_distance);
- _reference = compute_reference(image, format, border_mode, constant_border_value, models, descriptors, max_num_detection_windows, threshold, non_maxima_suppression, min_distance);
- }
-
-protected:
- template <typename V>
- void fill(V &&tensor, const std::string image, Format format)
- {
- library->fill(tensor, image, format);
- }
-
- void initialize_batch(const std::vector<HOGInfo> &models, MultiHOGType &multi_hog,
- std::vector<std::vector<U>> &descriptors, DetectionWindowStrideType &detection_window_strides)
- {
- for(unsigned i = 0; i < models.size(); ++i)
- {
- auto hog_model = reinterpret_cast<HOGType *>(multi_hog.model(i));
- hog_model->init(models[i]);
-
- // Initialise descriptor (linear SVM coefficients).
- std::random_device::result_type seed = 0;
- descriptors.at(i) = generate_random_real(models[i].descriptor_size(), -0.505f, 0.495f, seed);
-
- // Copy HOG descriptor values to HOG memory
- {
- HOGAccessorType hog_accessor(*hog_model);
- std::memcpy(hog_accessor.descriptor(), descriptors.at(i).data(), descriptors.at(i).size() * sizeof(U));
- }
-
- // Initialize detection window stride
- Size2DArrayAccessorType accessor(detection_window_strides);
- accessor.at(i) = models[i].block_stride();
- }
- }
-
- std::vector<DetectionWindow> compute_target(const std::string image, Format &format, BorderMode &border_mode, T constant_border_value,
- const std::vector<HOGInfo> &models, std::vector<std::vector<U>> &descriptors, unsigned int max_num_detection_windows,
- float threshold, bool non_max_suppression, float min_distance)
- {
- MultiHOGType multi_hog(models.size());
- DetectionWindowArrayType detection_windows(max_num_detection_windows);
- DetectionWindowStrideType detection_window_strides(models.size());
-
- // Resize detection window_strides for index access
- detection_window_strides.resize(models.size());
-
- // Initialiize MultiHOG and detection windows
- initialize_batch(models, multi_hog, descriptors, detection_window_strides);
-
- // Get image shape for src tensor
- TensorShape shape = library->get_image_shape(image);
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type_from_format(format));
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- FunctionType hog_multi_detection;
- hog_multi_detection.configure(&src, &multi_hog, &detection_windows, &detection_window_strides, border_mode, constant_border_value, threshold, non_max_suppression, min_distance);
-
- // Reset detection windows
- detection_windows.clear();
-
- // Allocate tensors
- src.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), image, format);
-
- // Compute function
- hog_multi_detection.run();
-
- // Copy detection windows
- std::vector<DetectionWindow> windows;
- DetectionWindowArrayAccessorType accessor(detection_windows);
-
- for(size_t i = 0; i < accessor.num_values(); i++)
- {
- DetectionWindow win;
- win.x = accessor.at(i).x;
- win.y = accessor.at(i).y;
- win.width = accessor.at(i).width;
- win.height = accessor.at(i).height;
- win.idx_class = accessor.at(i).idx_class;
- win.score = accessor.at(i).score;
-
- windows.push_back(win);
- }
-
- return windows;
- }
-
- std::vector<DetectionWindow> compute_reference(const std::string image, Format format, BorderMode border_mode, T constant_border_value,
- const std::vector<HOGInfo> &models, const std::vector<std::vector<U>> &descriptors, unsigned int max_num_detection_windows,
- float threshold, bool non_max_suppression, float min_distance)
- {
- // Create reference
- SimpleTensor<T> src{ library->get_image_shape(image), data_type_from_format(format) };
-
- // Fill reference
- fill(src, image, format);
-
- // NOTE: Detection window stride fixed to block stride
- return reference::hog_multi_detection(src, border_mode, constant_border_value, models, descriptors, max_num_detection_windows, threshold, non_max_suppression, min_distance);
- }
-
- std::vector<DetectionWindow> _target{};
- std::vector<DetectionWindow> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_HOG_MULTI_DETECTION_FIXTURE */
diff --git a/tests/validation/fixtures/HarrisCornersFixture.h b/tests/validation/fixtures/HarrisCornersFixture.h
deleted file mode 100644
index f1d1f2d135..0000000000
--- a/tests/validation/fixtures/HarrisCornersFixture.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_HARRIS_CORNERS_FIXTURE
-#define ARM_COMPUTE_TEST_HARRIS_CORNERS_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/HarrisCornerDetector.h"
-
-namespace arm_compute
-{
-class CLHarrisCorners;
-class NEHarrisCorners;
-
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename ArrayType, typename FunctionType, typename T>
-class HarrisCornersValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(std::string image, int gradient_size, int block_size, BorderMode border_mode, Format format)
- {
- HarrisCornersParameters params = harris_corners_parameters();
-
- _target = compute_target(image, gradient_size, block_size, border_mode, format, params);
- _reference = compute_reference(image, gradient_size, block_size, border_mode, format, params);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, RawTensor raw)
- {
- library->fill(tensor, raw);
- }
-
- ArrayType compute_target(std::string image, int gradient_size, int block_size, BorderMode border_mode, Format format, const HarrisCornersParameters &params)
- {
- // Load the image (cached by the library if loaded before)
- const RawTensor &raw = library->get(image, format);
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(raw.shape(), format);
-
- // Create array of keypoints
- ArrayType corners(raw.shape().total_size());
-
- // Create harris corners configure function
- FunctionType harris_corners;
- harris_corners.configure(&src, params.threshold, params.min_dist, params.sensitivity, gradient_size, block_size, &corners, border_mode, params.constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), raw);
-
- // Compute function
- harris_corners.run();
-
- return corners;
- }
-
- std::vector<KeyPoint> compute_reference(std::string image, int gradient_size, int block_size, BorderMode border_mode, Format format, const HarrisCornersParameters &params)
- {
- // Load the image (cached by the library if loaded before)
- const RawTensor &raw = library->get(image, format);
- // Create reference
- SimpleTensor<T> src{ raw.shape(), format };
-
- // Fill reference
- fill(src, raw);
-
- return reference::harris_corner_detector<T>(src, params.threshold, params.min_dist, params.sensitivity, gradient_size, block_size, border_mode, params.constant_border_value);
- }
-
- ArrayType _target{};
- std::vector<KeyPoint> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_HARRIS_CORNERS_FIXTURE */
diff --git a/tests/validation/fixtures/HistogramFixture.h b/tests/validation/fixtures/HistogramFixture.h
deleted file mode 100644
index 7349bdfae2..0000000000
--- a/tests/validation/fixtures/HistogramFixture.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_HISTOGRAM_FIXTURE
-#define ARM_COMPUTE_TEST_HISTOGRAM_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Histogram.h"
-#include "utils/Utils.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename DistributionType>
-class HistogramValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<size_t> distribution_size_t(1, 30);
- const size_t num_bins = distribution_size_t(gen);
- std::uniform_int_distribution<int32_t> distribution_int32_t(0, 125);
- const size_t offset = distribution_int32_t(gen);
- std::uniform_int_distribution<uint32_t> distribution_uint32_t(1, 255 - offset);
- const size_t range = distribution_uint32_t(gen);
-
- _target = compute_target(shape, data_type, num_bins, offset, range);
- _reference = compute_reference(shape, data_type, num_bins, offset, range);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, size_t num_bins, int32_t offset, uint32_t range)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(TensorShape(num_bins), DataType::U32);
- DistributionType distribution_dst(num_bins, offset, range);
-
- // Create and configure function
- FunctionType histogram;
- histogram.configure(&src, &distribution_dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- histogram.run();
-
- // Copy the distribution in a tensor
- arm_compute::utils::map(distribution_dst, true);
- AccessorType accessor_dst = AccessorType(dst);
- uint32_t *dst_data = static_cast<uint32_t *>(accessor_dst.data());
-
- ARM_COMPUTE_EXPECT(accessor_dst.size() <= dst.info()->total_size(), framework::LogLevel::ERRORS);
-
- std::copy_n(distribution_dst.buffer(), num_bins, dst_data);
- arm_compute::utils::unmap(distribution_dst);
- return dst;
- }
-
- SimpleTensor<uint32_t> compute_reference(const TensorShape &shape, DataType data_type, size_t num_bins, int32_t offset, uint32_t range)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::histogram<T>(src, num_bins, offset, range);
- }
-
- TensorType _target{};
- SimpleTensor<uint32_t> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_HISTOGRAM_FIXTURE */
diff --git a/tests/validation/fixtures/Im2ColFixture.h b/tests/validation/fixtures/Im2ColFixture.h
index 809bafd0b2..5c7978f4ab 100644
--- a/tests/validation/fixtures/Im2ColFixture.h
+++ b/tests/validation/fixtures/Im2ColFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,10 +45,9 @@ namespace validation
using namespace arm_compute::misc::shape_calculator;
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool batch_size_on_z>
-class Im2ColValidationFixture : public framework::Fixture
+class Im2ColOpValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, DataType data_type, const Size2D &kernel_dims, const PadStrideInfo &conv_info, const QuantizationInfo &quant_info, const DataLayout &data_layout,
unsigned int num_groups)
{
@@ -88,23 +87,28 @@ protected:
// Create and configure function
FunctionType im2col_func;
- im2col_func.configure(&src, &dst, _kernel_dims, _conv_info, _has_bias, Size2D(1U, 1U), _num_groups);
+ im2col_func.configure(src.info(), dst.info(), _kernel_dims, _conv_info, _has_bias, Size2D(1U, 1U), _num_groups);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
+ arm_compute::ITensorPack pack =
+ {
+ { arm_compute::TensorType::ACL_SRC, &src },
+ { arm_compute::TensorType::ACL_DST, &dst }
+ };
// Compute function
- im2col_func.run();
+ im2col_func.run(pack);
return dst;
}
diff --git a/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h b/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h
new file mode 100644
index 0000000000..7374093f51
--- /dev/null
+++ b/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE
+#define ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/Globals.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/IndirectConv2dAddressPrecalculation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename TensorType, typename AccessorType, typename OperatorType>
+class IndirectConv2dAddressPrecalculationValidationFixture : public framework::Fixture
+{
+public:
+ void setup(unsigned int src_w,
+ unsigned int src_h,
+ unsigned int src_b,
+ unsigned int wei_w,
+ unsigned int wei_h,
+ unsigned int pad,
+ unsigned int stride,
+ unsigned int m0)
+ {
+ DirectConvComputeKernelInfo desc;
+ desc.m0 = m0;
+ desc.n0 = 1; // Not used by the kernel
+ desc.k0 = 1; // Not used by the kernel
+ desc.export_weights_to_cl_image = false; // Not used by the kernel
+
+ const PadStrideInfo conv_info(stride, stride, pad, pad);
+
+ const TensorShape shape_conv_src(23, // The input channels are not used by the kernel
+ src_w,
+ src_h,
+ src_b);
+
+ const TensorShape shape_conv_wei(23, // The input channels are not used by the kernel
+ wei_w,
+ wei_h,
+ 23 // The output channels are not used by the kernel
+ );
+
+ // The result of the kernel does not change with the datatype. Hence, we can fix it to Fp16 for validation purposes
+ const DataType data_type = DataType::F16;
+
+ _target = compute_target(shape_conv_src, shape_conv_wei, data_type, conv_info, desc);
+ _reference = compute_reference(shape_conv_src, shape_conv_wei, data_type, conv_info, desc);
+ }
+
+protected:
+ TensorType compute_target(TensorShape shape_conv_src, TensorShape shape_conv_wei, DataType data_type, const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc)
+ {
+ TensorInfo src_conv_info(shape_conv_src, 1, data_type, DataLayout::NHWC);
+ TensorInfo wei_conv_info(shape_conv_wei, 1, data_type, DataLayout::NHWC);
+ TensorType dst;
+
+ // The output tensor will be auto-initialized within the function
+
+ // Create and configure function
+ OperatorType func;
+ func.configure(&src_conv_info, &wei_conv_info, dst.info(), conv_info, desc);
+
+ add_padding_x({ &dst });
+
+ // Allocate tensors
+ dst.allocator()->allocate();
+
+ // Compute GEMM LHS matrix reshape function
+ ITensorPack tensors = { { ACL_DST, &dst } };
+ func.run(tensors);
+
+ return dst;
+ }
+
+ SimpleTensor<int32_t> compute_reference(TensorShape shape_conv_src, TensorShape shape_conv_wei, DataType data_type, const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc)
+ {
+ ARM_COMPUTE_UNUSED(data_type);
+ TensorShape shape_out = compute_indirect_buffer_shape(shape_conv_src, DataLayout::NHWC, shape_conv_wei, conv_info, desc);
+ TensorShape output_conv_shape = compute_deep_convolution_shape(shape_conv_src, DataLayout::NHWC, shape_conv_wei, conv_info);
+
+ return reference::indirect_conv2d_addr_precalculation(shape_conv_src, shape_conv_wei, output_conv_shape, shape_out, conv_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<int32_t> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE */ \ No newline at end of file
diff --git a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
index 5e230d4430..c26dd99f02 100644
--- a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class InstanceNormalizationLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, DataLayout data_layout, bool in_place)
{
_target = compute_target(shape, data_type, data_layout, in_place);
@@ -55,7 +54,10 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- std::uniform_real_distribution<> distribution(1.f, 2.f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(1.0f), T(2.0f) };
library->fill(tensor, distribution, 0);
}
@@ -83,10 +85,10 @@ protected:
FunctionType instance_norm_func;
instance_norm_func.configure(&src, in_place ? nullptr : &dst, gamma, beta, epsilon);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
if(!in_place)
{
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
}
// Allocate tensors
@@ -96,10 +98,10 @@ protected:
dst.allocator()->allocate();
}
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
if(!in_place)
{
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
}
// Fill tensors
diff --git a/tests/validation/fixtures/IntegralImageFixture.h b/tests/validation/fixtures/IntegralImageFixture.h
deleted file mode 100644
index 8d2149e1fd..0000000000
--- a/tests/validation/fixtures/IntegralImageFixture.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_INTEGRAL_IMAGE_FIXTURE
-#define ARM_COMPUTE_TEST_INTEGRAL_IMAGE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/IntegralImage.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class IntegralImageValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type)
- {
- _target = compute_target(shape);
- _reference = compute_reference(shape, data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, DataType::U8);
- TensorType dst = create_tensor<TensorType>(shape, DataType::U32);
-
- // Create and configure function
- FunctionType integral_image;
- integral_image.configure(&src, &dst);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- integral_image.run();
-
- return dst;
- }
-
- SimpleTensor<uint32_t> compute_reference(const TensorShape &shape, DataType data_type)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- return reference::integral_image<T>(src);
- }
-
- TensorType _target{};
- SimpleTensor<uint32_t> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_INTEGRAL_IMAGE_FIXTURE */
diff --git a/tests/validation/fixtures/L2NormalizeLayerFixture.h b/tests/validation/fixtures/L2NormalizeLayerFixture.h
index e3e1510ff0..b8f4b1eaf3 100644
--- a/tests/validation/fixtures/L2NormalizeLayerFixture.h
+++ b/tests/validation/fixtures/L2NormalizeLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class L2NormalizeLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, DataLayout data_layout, int axis, float epsilon)
{
_target = compute_target(shape, data_type, data_layout, axis, epsilon);
@@ -59,7 +58,10 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- std::uniform_real_distribution<> distribution(1.f, 2.f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(1.0f), T(2.0f) };
library->fill(tensor, distribution, 0);
}
@@ -78,15 +80,15 @@ protected:
FunctionType l2_norm_func;
l2_norm_func.configure(&src, &dst, axis, epsilon);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/LSTMLayerFixture.h b/tests/validation/fixtures/LSTMLayerFixture.h
index 858ee07d3e..a32e9adfe5 100644
--- a/tests/validation/fixtures/LSTMLayerFixture.h
+++ b/tests/validation/fixtures/LSTMLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 ARM Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class LSTMLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape input_weights_shape, TensorShape recurrent_weights_shape, TensorShape cell_bias_shape, TensorShape output_cell_shape, TensorShape output_shape,
TensorShape scratch_shape, ActivationLayerInfo info, float cell_threshold, float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt,
bool use_layer_norm)
@@ -61,13 +60,19 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
template <typename U>
void fill_custom_val(U &&tensor, float num, int i)
{
- std::uniform_real_distribution<> distribution(num, num);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(num), T(num) };
library->fill(tensor, distribution, i);
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &input_weights_shape, const TensorShape &recurrent_weights_shape, const TensorShape &cell_bias_shape,
@@ -161,22 +166,22 @@ protected:
&scratch, &output_state_out, &cell_state_out, &output,
lstm_params, info, cell_threshold, projection_threshold);
- ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(input_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(input_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(input_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(recurrent_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(recurrent_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(recurrent_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(forget_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(cell_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output_state_in.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(cell_state_in.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(scratch.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output_state_out.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(cell_state_out.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(input_to_forget_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(input_to_cell_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(input_to_output_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(recurrent_to_forget_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(recurrent_to_cell_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(recurrent_to_output_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(forget_gate_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(cell_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output_gate_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output_state_in.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(cell_state_in.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(scratch.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output_state_out.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(cell_state_out.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
// Allocate tensors
input.allocator()->allocate();
@@ -196,22 +201,22 @@ protected:
cell_state_out.allocator()->allocate();
output.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!input_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!input_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!input_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!recurrent_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!recurrent_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!recurrent_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!forget_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!cell_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output_state_in.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!cell_state_in.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!scratch.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output_state_out.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!cell_state_out.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!input_to_forget_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!input_to_cell_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!input_to_output_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!recurrent_to_forget_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!recurrent_to_cell_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!recurrent_to_output_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!forget_gate_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!cell_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output_gate_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output_state_in.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!cell_state_in.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!scratch.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output_state_out.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!cell_state_out.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
// Fill tensors
fill(AccessorType(input), 0);
@@ -230,18 +235,18 @@ protected:
if(!cifg_opt)
{
- ARM_COMPUTE_EXPECT(input_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(recurrent_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(cell_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(input_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input_to_input_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(recurrent_to_input_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(cell_to_input_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(input_gate_bias.info()->is_resizable());
input_to_input_w.allocator()->allocate();
recurrent_to_input_w.allocator()->allocate();
cell_to_input_w.allocator()->allocate();
input_gate_bias.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!recurrent_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!cell_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!input_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input_to_input_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!recurrent_to_input_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!cell_to_input_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!input_gate_bias.info()->is_resizable());
fill(AccessorType(input_to_input_w), 13);
fill(AccessorType(recurrent_to_input_w), 14);
if(peephole_opt)
@@ -254,26 +259,26 @@ protected:
if(peephole_opt)
{
- ARM_COMPUTE_EXPECT(cell_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(cell_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(cell_to_forget_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(cell_to_output_w.info()->is_resizable());
cell_to_forget_w.allocator()->allocate();
cell_to_output_w.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!cell_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!cell_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!cell_to_forget_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!cell_to_output_w.info()->is_resizable());
fill(AccessorType(cell_to_forget_w), 18);
fill(AccessorType(cell_to_output_w), 19);
}
if(projection_opt)
{
- ARM_COMPUTE_EXPECT(projection_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(projection_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(projection_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(projection_bias.info()->is_resizable());
projection_w.allocator()->allocate();
projection_bias.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!projection_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!projection_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!projection_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!projection_bias.info()->is_resizable());
fill(AccessorType(projection_w), 20);
fill(AccessorType(projection_bias), 21);
@@ -283,25 +288,25 @@ protected:
{
if(!cifg_opt)
{
- ARM_COMPUTE_EXPECT(input_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input_layer_norm_w.info()->is_resizable());
input_layer_norm_w.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input_layer_norm_w.info()->is_resizable());
fill(AccessorType(input_layer_norm_w), 22);
}
- ARM_COMPUTE_EXPECT(forget_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(cell_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(forget_layer_norm_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(cell_layer_norm_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output_layer_norm_w.info()->is_resizable());
forget_layer_norm_w.allocator()->allocate();
cell_layer_norm_w.allocator()->allocate();
output_layer_norm_w.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!forget_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!cell_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!forget_layer_norm_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!cell_layer_norm_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output_layer_norm_w.info()->is_resizable());
fill(AccessorType(forget_layer_norm_w), 23);
fill(AccessorType(cell_layer_norm_w), 24);
@@ -452,7 +457,6 @@ protected:
}
input_gate = reference::activation_layer(input_gate, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
}
-
// Compute cell_state
SimpleTensor<T> fully_connected_cell_state = reference::fully_connected_layer(input, input_to_cell_w, cell_bias, output_cell_shape);
transposed_weights = reference::transpose(recurrent_to_cell_w);
@@ -468,12 +472,13 @@ protected:
fill(cell_bias, 8);
cell_state_out = reference::arithmetic_operation(reference::ArithmeticOperation::ADD, cell_state_out, cell_bias, data_type, ConvertPolicy::SATURATE);
}
- cell_state_out = reference::activation_layer(cell_state_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
+ cell_state_out = reference::activation_layer(cell_state_out, info);
cell_state_out = reference::pixel_wise_multiplication<T, T, T>(cell_state_out, input_gate, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN, data_type);
cell_state_out = reference::arithmetic_operation(reference::ArithmeticOperation::ADD, cell_state_out, pixelwise_mul, data_type, ConvertPolicy::SATURATE);
+
if(cell_threshold != 0.f)
{
- cell_state_out = reference::activation_layer(cell_state_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold, cell_threshold));
+ cell_state_out = reference::activation_layer(cell_state_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, cell_threshold, -cell_threshold));
}
// Compute output
@@ -509,7 +514,6 @@ protected:
output_state_out = reference::activation_layer(fully_connected_projection, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold, projection_threshold));
}
}
-
std::vector<SimpleTensor<T>> scratch_inputs;
if(!cifg_opt)
{
diff --git a/tests/validation/fixtures/LaplacianPyramidFixture.h b/tests/validation/fixtures/LaplacianPyramidFixture.h
deleted file mode 100644
index 6344272640..0000000000
--- a/tests/validation/fixtures/LaplacianPyramidFixture.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_LAPLACIAN_PYRAMID_FIXTURE
-#define ARM_COMPUTE_TEST_LAPLACIAN_PYRAMID_FIXTURE
-
-#include "arm_compute/core/IPyramid.h"
-#include "arm_compute/core/PyramidInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/LaplacianPyramid.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename U, typename PyramidType>
-class LaplacianPyramidValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, BorderMode border_mode, size_t num_levels, Format format_in, Format format_out)
- {
- std::mt19937 generator(library->seed());
- std::uniform_int_distribution<T> distribution_u8(0, 255);
- const T constant_border_value = distribution_u8(generator);
-
- _pyramid_levels = num_levels;
- _border_mode = border_mode;
-
- _target = compute_target(input_shape, border_mode, constant_border_value, format_in, format_out);
- _reference = compute_reference(input_shape, border_mode, constant_border_value, format_in, format_out);
- }
-
-protected:
- template <typename V>
- void fill(V &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- PyramidType compute_target(const TensorShape &input_shape, BorderMode border_mode, T constant_border_value,
- Format format_in, Format format_out)
- {
- // Create pyramid
- PyramidType pyramid{};
-
- // Create Pyramid Info
- PyramidInfo pyramid_info(_pyramid_levels, SCALE_PYRAMID_HALF, input_shape, format_out);
-
- // Use conservative padding strategy to fit all subsequent kernels
- pyramid.init_auto_padding(pyramid_info);
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, format_in);
-
- // The first two dimensions of the output tensor must match the first
- // two dimensions of the tensor in the last level of the pyramid
- TensorShape dst_shape(input_shape);
- dst_shape.set(0, pyramid.get_pyramid_level(_pyramid_levels - 1)->info()->dimension(0));
- dst_shape.set(1, pyramid.get_pyramid_level(_pyramid_levels - 1)->info()->dimension(1));
-
- // The lowest resolution tensor necessary to reconstruct the input
- // tensor from the pyramid.
- _dst_target = create_tensor<TensorType>(dst_shape, format_out);
-
- // Create and configure function
- FunctionType laplacian_pyramid;
- laplacian_pyramid.configure(&src, &pyramid, &_dst_target, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(_dst_target.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- _dst_target.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!_dst_target.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- pyramid.allocate();
-
- for(size_t i = 0; i < pyramid_info.num_levels(); ++i)
- {
- ARM_COMPUTE_EXPECT(!pyramid.get_pyramid_level(i)->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- laplacian_pyramid.run();
-
- return pyramid;
- }
-
- std::vector<SimpleTensor<U>> compute_reference(const TensorShape &shape, BorderMode border_mode, T constant_border_value,
- Format format_in, Format format_out)
- {
- // Create reference
- SimpleTensor<T> src{ shape, format_in };
-
- // The first two dimensions of the output tensor must match the first
- // two dimensions of the tensor in the last level of the pyramid
- TensorShape dst_shape(shape);
- dst_shape.set(0, static_cast<float>(shape[0] + 1) / static_cast<float>(std::pow(2, _pyramid_levels - 1)));
- dst_shape.set(1, static_cast<float>(shape[1] + 1) / static_cast<float>(std::pow(2, _pyramid_levels - 1)));
-
- _dst_reference = SimpleTensor<U>(dst_shape, format_out);
-
- // Fill reference
- fill(src);
-
- return reference::laplacian_pyramid<T, U>(src, _dst_reference, _pyramid_levels, border_mode, constant_border_value);
- }
-
- size_t _pyramid_levels{};
- BorderMode _border_mode{};
- SimpleTensor<U> _dst_reference{};
- TensorType _dst_target{};
- PyramidType _target{};
- std::vector<SimpleTensor<U>> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_LAPLACIAN_PYRAMID_FIXTURE */
diff --git a/tests/validation/fixtures/LaplacianReconstructFixture.h b/tests/validation/fixtures/LaplacianReconstructFixture.h
deleted file mode 100644
index dfa302397f..0000000000
--- a/tests/validation/fixtures/LaplacianReconstructFixture.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_LAPLACIAN_RECONSTRUCT_FIXTURE
-#define ARM_COMPUTE_TEST_LAPLACIAN_RECONSTRUCT_FIXTURE
-
-#include "arm_compute/core/IPyramid.h"
-#include "arm_compute/core/PyramidInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/fixtures/LaplacianPyramidFixture.h"
-#include "tests/validation/reference/LaplacianPyramid.h"
-#include "tests/validation/reference/LaplacianReconstruct.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename LaplacianPyramidType, typename T, typename U, typename PyramidType>
-class LaplacianReconstructValidationFixture : public LaplacianPyramidValidationFixture<TensorType, AccessorType, LaplacianPyramidType, U, T, PyramidType>
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, BorderMode border_mode, size_t num_levels, Format format_in, Format format_out)
- {
- std::mt19937 generator(library->seed());
- std::uniform_int_distribution<U> distribution_u8(0, 255);
- const U constant_border_value = distribution_u8(generator);
-
- using LPF = LaplacianPyramidValidationFixture<TensorType, AccessorType, LaplacianPyramidType, U, T, PyramidType>;
- LPF::setup(input_shape, border_mode, num_levels, format_out, format_in);
-
- // Compute target and reference values using the pyramid and lowest
- // resolution tensor output from Laplacian Pyramid kernel
- _target = compute_target(input_shape, LPF::_target, LPF::_dst_target, border_mode, constant_border_value);
- _reference = compute_reference(LPF::_reference, LPF::_dst_reference, border_mode, constant_border_value);
- }
-
-protected:
- template <typename V>
- void fill(V &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &input_shape, PyramidType &pyramid, TensorType &low_res, BorderMode border_mode, U constant_border_value)
- {
- // Create tensors
- TensorType dst = create_tensor<TensorType>(input_shape, DataType::U8);
-
- // Create and configure function
- FunctionType laplacian_reconstruct;
- laplacian_reconstruct.configure(&pyramid, &low_res, &dst, border_mode, constant_border_value);
-
- // Allocate tensors
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Compute function
- laplacian_reconstruct.run();
-
- return dst;
- }
-
- SimpleTensor<U> compute_reference(const std::vector<SimpleTensor<T>> &pyramid,
- const SimpleTensor<T> &low_res, BorderMode border_mode, U constant_border_value)
- {
- return reference::laplacian_reconstruct<T, U>(pyramid, low_res, border_mode, constant_border_value);
- }
-
- TensorType _target{};
- SimpleTensor<U> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_LAPLACIAN_RECONSTRUCT_FIXTURE */
diff --git a/tests/validation/fixtures/LocallyConnectedFixture.h b/tests/validation/fixtures/LocallyConnectedFixture.h
deleted file mode 100644
index f87e6e470c..0000000000
--- a/tests/validation/fixtures/LocallyConnectedFixture.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_LOCALLY_CONNECTED_FIXTURE
-#define ARM_COMPUTE_TEST_LOCALLY_CONNECTED_FIXTURE
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/LocallyConnected.h"
-#include "tests/validation/reference/Utils.h"
-
-#include <random>
-
-namespace arm_compute
-{
-class NELocallyConnected;
-
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class LocallyConnectedValidationFixture : public framework::Fixture
-{
-public:
- using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
-
-public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type)
- {
- ARM_COMPUTE_UNUSED(dilation);
-
- _data_type = data_type;
- _bias_data_type = data_type;
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, i);
- }
-
- TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
- {
- TensorShape reshaped_weights_shape(weights_shape);
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, _data_type);
- TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type);
- TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type);
- TensorType dst = create_tensor<TensorType>(output_shape, _data_type);
-
- // Create and configure function
- FunctionType locally_connected;
- locally_connected.configure(&src, &weights, &bias, &dst, info);
-
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- bias.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(weights), 1);
- fill(AccessorType(bias), 2);
-
- locally_connected.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
- {
- // Create reference
- SimpleTensor<T> src(input_shape, _data_type);
- SimpleTensor<T> weights(weights_shape, _data_type);
- SimpleTensor<TBias> bias(bias_shape, _bias_data_type);
-
- // Fill reference
- fill(src, 0);
- fill(weights, 1);
- fill(bias, 2);
-
- return reference::locally_connected<T>(src, weights, bias, output_shape, info);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- DataType _data_type{};
- DataType _bias_data_type{};
-};
-
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_LOCALLY_CONNECTED_FIXTURE */
diff --git a/tests/validation/fixtures/LogicalFixture.h b/tests/validation/fixtures/LogicalFixture.h
new file mode 100644
index 0000000000..60dc963ba7
--- /dev/null
+++ b/tests/validation/fixtures/LogicalFixture.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2020-2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_LOGICAL_FIXTURE
+#define ARM_COMPUTE_TEST_LOGICAL_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/Logical.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LogicalOperationValidationFixtureBase : public framework::Fixture
+{
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ constexpr auto zero = static_cast<uint8_t>(0);
+ constexpr auto one = static_cast<uint8_t>(0x1);
+ constexpr auto mixed = static_cast<uint8_t>(0xAA);
+ constexpr auto mixed_bitwise_not = static_cast<uint8_t>((~0xAA));
+
+ library->fill_static_values(tensor, i == 0 ?
+ std::vector<uint8_t> { zero, one, zero, one, mixed, zero, mixed } :
+ std::vector<uint8_t> { zero, zero, one, one, zero, mixed, mixed_bitwise_not });
+ }
+
+ void allocate_tensor(std::initializer_list<TensorType *> tensors)
+ {
+ for(auto t : tensors)
+ {
+ ARM_COMPUTE_ASSERT(t->info()->is_resizable());
+ t->allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!t->info()->is_resizable());
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename T>
+using LogicalBinaryRefFunctionPtrType = SimpleTensor<T>(const SimpleTensor<T> &, const SimpleTensor<T> &);
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, LogicalBinaryRefFunctionPtrType<T> RefFunction>
+class LogicalBinaryOperationValidationFixture : public LogicalOperationValidationFixtureBase<TensorType, AccessorType, FunctionType, T>
+{
+ using Parent = LogicalOperationValidationFixtureBase<TensorType, AccessorType, FunctionType, T>;
+
+public:
+ void setup(TensorShape shape0, TensorShape shape1)
+ {
+ Parent::_target = compute_target(shape0, shape1);
+ Parent::_reference = compute_reference(shape0, shape1);
+ }
+
+private:
+ TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1)
+ {
+ TensorType src0 = create_tensor<TensorType>(shape0, _data_type);
+ TensorType src1 = create_tensor<TensorType>(shape1, _data_type);
+ TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), _data_type);
+
+ FunctionType logical_binary_op;
+
+ logical_binary_op.configure(&src0, &src1, &dst);
+
+ Parent::allocate_tensor({ &src0, &src1, &dst });
+
+ Parent::fill(AccessorType(src0), 0);
+ Parent::fill(AccessorType(src1), 1);
+
+ logical_binary_op.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1)
+ {
+ // Create reference
+ SimpleTensor<T> src0{ shape0, _data_type };
+ SimpleTensor<T> src1{ shape1, _data_type };
+
+ // Fill reference
+ Parent::fill(src0, 0);
+ Parent::fill(src1, 1);
+
+ return RefFunction(src0, src1);
+ }
+
+ static constexpr auto _data_type = DataType::U8;
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+using LogicalOrValidationFixture = LogicalBinaryOperationValidationFixture<TensorType, AccessorType, FunctionType, T, &reference::logical_or<T>>;
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+using LogicalAndValidationFixture = LogicalBinaryOperationValidationFixture<TensorType, AccessorType, FunctionType, T, &reference::logical_and<T>>;
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LogicalNotValidationFixture : public LogicalOperationValidationFixtureBase<TensorType, AccessorType, FunctionType, T>
+{
+ using Parent = LogicalOperationValidationFixtureBase<TensorType, AccessorType, FunctionType, T>;
+
+public:
+ void setup(TensorShape shape, DataType data_type)
+ {
+ Parent::_target = compute_target(shape, data_type);
+ Parent::_reference = compute_reference(shape, data_type);
+ }
+
+private:
+ TensorType compute_target(const TensorShape &shape, DataType data_type)
+ {
+ TensorType src = create_tensor<TensorType>(shape, data_type);
+ TensorType dst = create_tensor<TensorType>(shape, data_type);
+
+ FunctionType logical_not;
+
+ logical_not.configure(&src, &dst);
+
+ Parent::allocate_tensor({ &src, &dst });
+
+ Parent::fill(AccessorType(src), 0);
+
+ logical_not.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type)
+ {
+ // Create reference
+ SimpleTensor<T> src{ shape, data_type };
+
+ // Fill reference
+ Parent::fill(src, 0);
+
+ return reference::logical_not<T>(src);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_LOGICAL_FIXTURE */
diff --git a/tests/validation/fixtures/MagnitudeFixture.h b/tests/validation/fixtures/MagnitudeFixture.h
deleted file mode 100644
index 0930fb4117..0000000000
--- a/tests/validation/fixtures/MagnitudeFixture.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_MAGNITUDE_FIXTURE
-#define ARM_COMPUTE_TEST_MAGNITUDE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Magnitude.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class MagnitudeValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, Format format, MagnitudeType magnitude_type)
- {
- _target = compute_target(shape, format, magnitude_type);
- _reference = compute_reference(shape, format, magnitude_type);
- _magnitude_type = magnitude_type;
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, std::random_device::result_type seed_offset)
- {
- library->fill_tensor_uniform(tensor, seed_offset);
- }
-
- TensorType compute_target(const TensorShape &shape, Format format, MagnitudeType magnitude_type)
- {
- DataType data_type = data_type_from_format(format);
-
- // Create tensors
- TensorType src1 = create_tensor<TensorType>(shape, data_type);
- src1.info()->set_format(format);
-
- TensorType src2 = create_tensor<TensorType>(shape, data_type);
- src2.info()->set_format(format);
-
- TensorType dst = create_tensor<TensorType>(shape, data_type);
- dst.info()->set_format(format);
-
- // Create and configure function
- FunctionType magnitude;
- magnitude.configure(&src1, &src2, &dst, magnitude_type);
-
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src1.allocator()->allocate();
- src2.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src1), 0);
- fill(AccessorType(src2), 1);
-
- // Compute function
- magnitude.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, Format format, MagnitudeType magnitude_type)
- {
- DataType data_type = data_type_from_format(format);
-
- // Create reference
- SimpleTensor<T> src1{ shape, data_type };
- SimpleTensor<T> src2{ shape, data_type };
-
- // Fill reference
- fill(src1, 0);
- fill(src2, 1);
-
- return reference::magnitude<T>(src1, src2, magnitude_type);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- MagnitudeType _magnitude_type{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_MAGNITUDE_FIXTURE */
diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h
new file mode 100644
index 0000000000..ffd12e56d0
--- /dev/null
+++ b/tests/validation/fixtures/MatMulFixture.h
@@ -0,0 +1,612 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+
+#include "src/core/utils/quantization/AsymmHelpers.h"
+#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/GEMM.h"
+#include "tests/validation/reference/GEMMLowp.h"
+#include "tests/validation/reference/Permute.h"
+#include "tests/validation/reference/ReshapeLayer.h"
+#include "tests/validation/Validation.h"
+
+#include <limits>
+#include <random>
+#include <type_traits>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulGenericValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ Settings settings,
+ QuantizationInfo a_qinfo = QuantizationInfo(),
+ QuantizationInfo b_qinfo = QuantizationInfo(),
+ QuantizationInfo o_qinfo = QuantizationInfo())
+ {
+ // For brevity, the input shapes are assumed to be not-transposed for both a and b matrices.
+ if (transpose_a)
+ {
+ permute(shape_a, PermutationVector(1U, 0U));
+ }
+ if (transpose_b)
+ {
+ permute(shape_b, PermutationVector(1U, 0U));
+ }
+
+ _target = compute_target(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info,
+ num_extra_runs, settings, a_qinfo, b_qinfo, o_qinfo);
+ _reference = compute_reference(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info,
+ a_qinfo, b_qinfo, o_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f)
+ {
+ switch (tensor.data_type())
+ {
+ case DataType::BFLOAT16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{float(lo), float(hi)};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{float(lo), float(hi)};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(lo, hi);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ {
+ library->fill_tensor_uniform(tensor, i);
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported data type.");
+ }
+ }
+ }
+
+ virtual TensorType compute_target(const TensorShape &shape_a,
+ const TensorShape &shape_b,
+ const TensorShape &output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ const Settings &settings,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
+ {
+ // 1. Create Classes and configure function
+ // ----------------------------------------------------
+ // Create tensors
+ // Configure relevant classes and matmul function
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, b_qinfo);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, o_qinfo);
+
+ FunctionType matmul;
+
+ // Configure MatMulInfo class
+ MatMulInfo mm_info;
+ mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b);
+
+ // Ensure values are dynamic
+ a.info()->set_are_values_constant(false);
+ b.info()->set_are_values_constant(false);
+
+ // Configure operator
+ matmul.configure(&a, &b, &dst, mm_info, settings, act_info);
+
+ // Assertions
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // For multiple runs.
+ for (int i = 0; i < num_extra_runs; i++)
+ {
+ // Stress dynamic tensors by running multiple times.
+ // --------------------------------------------------------
+ // Fill tensors with new seed
+ // Run function
+ const int seed_offset = num_extra_runs * 100;
+ fill(AccessorType(a), seed_offset);
+ fill(AccessorType(b), seed_offset + 1);
+
+ matmul.run();
+ }
+
+ // 2. Final Run for reference comparison
+ // --------------------------------------------------------
+ // Re-fill tensors same seed as reference run
+ // Compute MatMul operation
+ fill(AccessorType(a), 2);
+ fill(AccessorType(b), 3);
+
+ matmul.run();
+
+ return dst;
+ }
+
+ template <typename TT>
+ typename std::enable_if < !std::is_integral<TT>::value, SimpleTensor<TT >>::type
+ compute_reference_gemm(const SimpleTensor<TT> &a,
+ const SimpleTensor<TT> &b,
+ const SimpleTensor<TT> &c,
+ float alpha,
+ float beta,
+ const QuantizationInfo &o_qinfo)
+ {
+ ARM_COMPUTE_UNUSED(o_qinfo);
+
+ return reference::gemm(a, b, c, alpha, beta);
+ }
+
+ template <typename TT>
+ typename std::enable_if<std::is_integral<TT>::value, SimpleTensor<TT>>::type
+ compute_reference_gemm(const SimpleTensor<TT> &a,
+ const SimpleTensor<TT> &b,
+ const SimpleTensor<TT> &c,
+ float alpha,
+ float beta,
+ const QuantizationInfo &o_qinfo)
+ {
+ ARM_COMPUTE_UNUSED(alpha, beta);
+
+ const auto aq = a.quantization_info().uniform();
+ const auto bq = b.quantization_info().uniform();
+ const auto oq = o_qinfo.uniform();
+
+ const auto multiplier = aq.scale * bq.scale / oq.scale;
+
+ int32_t output_multiplier = 0;
+ int32_t output_shift = 0;
+ quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
+ std::vector<int32_t> output_multipliers{output_multiplier};
+ std::vector<int32_t> output_shifts{output_shift};
+
+ //The lhs and rhs offsets are negated here to keep the reference aligned with the function implementation where the lhs and rhs offsets are also negated.
+ const auto tmp = reference::gemmlowp_matrix_multiply_core<int32_t>(a, b, c.shape(), -aq.offset, -bq.offset);
+
+ auto output = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TT>(
+ tmp, output_multipliers, output_shifts, oq.offset, std::numeric_limits<int32_t>::lowest(),
+ std::numeric_limits<int32_t>::max());
+ output.quantization_info(o_qinfo);
+
+ return output;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &a_shape,
+ const TensorShape &b_shape,
+ const TensorShape &output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
+ {
+ // We collapse dimensions > 2 onto dimension 2, i.e. 4D+ tensors will look like 3D
+ // This is necessary unless we choose to extend gemm reference for 4D+ tensors
+ TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ);
+ TensorShape a_shape_collapsed = a_shape.collapsed_from(Window::DimZ);
+ TensorShape b_shape_collapsed = b_shape.collapsed_from(Window::DimZ);
+
+ // Create reference
+ SimpleTensor<T> a{a_shape_collapsed, data_type, 1, a_qinfo};
+ SimpleTensor<T> b{b_shape_collapsed, data_type, 1, b_qinfo};
+ SimpleTensor<T> c{output_shape_collapsed, data_type, 1};
+
+ // Fill reference
+ fill(a, 2);
+ fill(b, 3);
+
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if transpose_a is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if transpose_b is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+
+ // Define transposed shapes
+ TensorShape a_transposed_shape(a.shape());
+ a_transposed_shape.set(0, a.shape().y());
+ a_transposed_shape.set(1, a.shape().x());
+
+ TensorShape b_transposed_shape(b.shape());
+ b_transposed_shape.set(0, b.shape().y());
+ b_transposed_shape.set(1, b.shape().x());
+
+ // Define transposed tensors
+ SimpleTensor<T> a_transposed{a_transposed_shape, data_type};
+ SimpleTensor<T> b_transposed{b_transposed_shape, data_type};
+
+ // pretranspose a if necessary
+ if (transpose_a)
+ {
+ a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U));
+ }
+ // pretranspose b if necessary
+ if (transpose_b)
+ {
+ b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U));
+ }
+
+ // Setting beta to 0 will effectively disable C for the
+ // computation of the reference: alpha * A * B + 0 * C
+ // Use transposed tensors if boolean enabled else use original tensors
+ auto result = compute_reference_gemm<T>((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c,
+ 1.0f, 0.f, o_qinfo);
+
+ result = reference::activation_layer<T>(result, act_info, o_qinfo);
+
+ // We reshape the gemm output back if the tensor is high dimensional
+ if (output_shape_collapsed != output_shape)
+ {
+ result = reference::reshape_layer(result, output_shape);
+ }
+
+ return result;
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+/// TODO: (ONCPUML-1451) The current state of this fixture is interim and a longer-term testing method will be implemented later.
+/// @note: Currently we support only a 2x2 test due to the lack of reorder ref. implementation.
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulFixedFormatFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ TensorType compute_target(const TensorShape &shape_a,
+ const TensorShape &shape_b,
+ const TensorShape &output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ const Settings &settings,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo) override
+ {
+ // 1. Create Classes and configure function
+ // ----------------------------------------------------
+ // Create tensors
+ // Configure relevant classes and matmul function
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, b_qinfo);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, o_qinfo);
+
+ const auto weight_tensor_info = TensorInfo(*b.info());
+ const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info);
+ TensorType weights_transformed = create_tensor<TensorType>(new_tensor_info);
+
+ // Configure MatMulInfo class
+ MatMulInfo mm_info;
+ mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b);
+
+ // Ensure values are dynamic
+ a.info()->set_are_values_constant(false);
+ b.info()->set_are_values_constant(false);
+ weights_transformed.info()->set_are_values_constant(false);
+
+ FunctionType matmul;
+
+ // Configure operator
+ matmul.configure(&a, &weights_transformed, &dst, mm_info, settings, act_info);
+
+ // Assertions
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights_transformed.info()->is_resizable());
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ dst.allocator()->allocate();
+ weights_transformed.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights_transformed.info()->is_resizable());
+
+ // For multiple runs.
+ for (int i = 0; i < num_extra_runs; i++)
+ {
+ // Stress dynamic tensors by running multiple times.
+ // --------------------------------------------------------
+ // Fill tensors with new seed
+ // Run function
+ const int seed_offset = num_extra_runs * 100;
+ this->fill(AccessorType(a), seed_offset);
+ this->fill(AccessorType(b), seed_offset + 1);
+
+ matmul.run();
+ }
+
+ // 2. Final Run for reference comparison
+ // --------------------------------------------------------
+ // Re-fill tensors same seed as reference run
+ // Compute MatMul operation
+ this->fill(AccessorType(a), 2);
+ this->fill(AccessorType(b), 3);
+
+ rearrange_data(AccessorType(b), AccessorType(weights_transformed));
+
+ matmul.run();
+
+ return dst;
+ }
+
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ Settings settings,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
+ {
+ if (CPUInfo::get().has_bf16())
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings,
+ a_qinfo, b_qinfo, o_qinfo);
+ }
+ }
+
+private:
+ TensorInfo prepare_weights(const TensorInfo tensor_info)
+ {
+ const DataLayout data_layout = tensor_info.data_layout();
+ ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS);
+ const DataType data_type = tensor_info.data_type();
+ const TensorShape tensor_shape = tensor_info.tensor_shape();
+ const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
+ const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
+ ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS);
+
+ arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes();
+ strides_in_bytes.set(1, 32);
+ strides_in_bytes.set(2, 32);
+
+ const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes();
+ const size_t total_size_in_bytes = 32;
+
+ const TensorShape TS(H, W);
+
+ TensorInfo new_tensor_info = tensor_info;
+ new_tensor_info.init(TS, tensor_info.num_channels(), data_type, strides_in_bytes, offset_first_element_in_bytes,
+ total_size_in_bytes);
+
+ return new_tensor_info;
+ }
+
+ void rearrange_data(const AccessorType src, AccessorType dst)
+ {
+ const TensorShape src_tensor_shape = src.shape();
+ const DataLayout data_layout = src.data_layout();
+ ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS);
+ const unsigned int O =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
+ const unsigned int H =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
+ const unsigned int W =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
+ const unsigned int I =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
+ ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(I == 1 && O == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS);
+
+ const T *src_ptr = reinterpret_cast<const T *>(src.data());
+ T *dst_ptr = reinterpret_cast<T *>(dst.data());
+
+ // rearrange indexes for 2x2 input and weight
+ int dst_idx[] = {0, 4, 1, 5};
+ for (int i = 0; i < 4; i++)
+ {
+ dst_ptr[dst_idx[i]] = src_ptr[i];
+ }
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0, Settings());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationWithDynamicTensorsFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class QuantizedMatMulValidationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
+ a_qinfo, b_qinfo, o_qinfo);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationWithActivationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationWithActivationAlphaBetaFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo::ActivationFunction function,
+ float alpha_beta)
+ {
+ ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class QuantizedMatMulValidationWithActivationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo::ActivationFunction function,
+ float alpha_beta,
+ int num_extra_runs,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
+ {
+ ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
+ a_qinfo, b_qinfo, o_qinfo);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H
diff --git a/tests/validation/fixtures/MatMulKernelFixture.h b/tests/validation/fixtures/MatMulKernelFixture.h
new file mode 100644
index 0000000000..26072dff65
--- /dev/null
+++ b/tests/validation/fixtures/MatMulKernelFixture.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/GEMM.h"
+#include "tests/validation/reference/GEMMLowp.h"
+#include "tests/validation/reference/Permute.h"
+#include "tests/validation/reference/ReshapeLayer.h"
+#include <cmath>
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::opencl::kernels;
+
+template <typename T, typename KernelType, bool use_mmul = false>
+class MatMulKernelGenericValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type,
+ bool enable_bias)
+ {
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = M0 + N0 + K0 + shape_a[0] + shape_a[1] + shape_b[0] + shape_b[1] + enable_bias + export_rhs_to_cl_image;
+
+ // Flag to create a bias
+ _enable_bias = enable_bias;
+
+ // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices.
+ QuantizationInfo lhs_q_info;
+ QuantizationInfo rhs_q_info;
+ QuantizationInfo dst_q_info;
+
+ if(is_data_type_quantized(data_type))
+ {
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ lhs_q_info = QuantizationInfo(scale_lhs, offset_lhs);
+ rhs_q_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ const int m = shape_a.y();
+ const int n = shape_b.x();
+ const int k = shape_a.x();
+
+ const float bias_fraction = enable_bias ? 0.5f : 0.f;
+
+ QuantizationHint q_hint = suggest_matmul_dst_q_info_and_bias(lhs_q_info, rhs_q_info, m, n, k, data_type, bias_fraction);
+ dst_q_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+ }
+
+ if(pretranspose_a)
+ {
+ permute(shape_a, PermutationVector(1U, 0U));
+ }
+
+ if(pretranspose_b)
+ {
+ permute(shape_b, PermutationVector(1U, 0U));
+ }
+
+ // Skip configurations unsupported by the device.
+ _device_supports_export_to_cl_image = image2d_from_buffer_supported(CLKernelLibrary::get().get_device());
+ if(!_device_supports_export_to_cl_image && export_rhs_to_cl_image)
+ {
+ ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
+ framework::ARM_COMPUTE_PRINT_INFO();
+ return; // Note: Also need to skip the validate in corresponding FIXTURE_DATA_TEST_CASEs.
+ }
+
+ _device_supports_mmul = arm_matrix_multiply_supported(CLKernelLibrary::get().get_device());
+ if(!_device_supports_mmul && use_mmul)
+ {
+ ARM_COMPUTE_TEST_INFO("cl_arm_matrix_multiply not supported. TEST skipped");
+ framework::ARM_COMPUTE_PRINT_INFO();
+ return; // Note: Also need to skip the validate in corresponding FIXTURE_DATA_TEST_CASEs.
+ }
+
+ _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, lhs_q_info, rhs_q_info, dst_q_info);
+ _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type, lhs_q_info, rhs_q_info, dst_q_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(lo), float(hi) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(lo, hi);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ template <typename U>
+ void fill_bias_s32(U &&tensor, int i, int32_t min, int32_t max)
+ {
+ std::uniform_int_distribution<int32_t> distribution(min, max);
+ library->fill(tensor, distribution, i);
+ }
+
+ template <typename U, typename D>
+ void fill_constant(U &&tensor, D value)
+ {
+ library->fill_tensor_value(tensor, value);
+ }
+
+ CLTensor compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0,
+ bool export_rhs_to_cl_image, DataType data_type, const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info)
+ {
+ CLSynthetizeOperator<KernelType> matMul{};
+ MatMulKernelInfo matmul_info;
+ matmul_info.adj_lhs = pretranspose_a;
+ matmul_info.adj_rhs = pretranspose_b;
+ matmul_info.m0 = M0;
+ matmul_info.n0 = N0;
+ matmul_info.k0 = K0;
+ matmul_info.export_rhs_to_cl_image = export_rhs_to_cl_image;
+
+ bool is_quantized = is_data_type_quantized(data_type);
+
+ // Create tensors
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, lhs_q_info);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, rhs_q_info);
+ CLTensor bias = create_tensor<CLTensor>(output_shape[0], (is_quantized) ? DataType::S32 : data_type, 1, dst_q_info);
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, dst_q_info);
+
+ matMul.configure(a.info(), b.info(), (_enable_bias) ? bias.info() : nullptr, dst.info(), matmul_info);
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(CLAccessor(a), _hash + 1);
+ fill(CLAccessor(b), _hash + 2);
+
+ // Compute matMul kernel
+ ITensorPack tensors_pack({ { ACL_SRC_0, &a },
+ { ACL_SRC_1, &b },
+ { ACL_DST, &dst }
+ });
+
+ if(_enable_bias)
+ {
+ // Allocate, fill and add bias to TensorPack obj
+ bias.allocator()->allocate();
+ if(is_quantized)
+ {
+ fill_bias_s32(CLAccessor(bias), _hash + 3, _min_bias, _max_bias);
+ }
+ else
+ {
+ fill(CLAccessor(bias), _hash + 3);
+ }
+ tensors_pack.add_tensor(ACL_SRC_2, &bias);
+ }
+
+ matMul.run(tensors_pack);
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type,
+ const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info)
+ {
+ // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D
+ // This is necessary unless we choose to extend gemm reference for 5D+ tensors
+ TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ);
+ TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimZ);
+ TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimZ);
+
+ // Create reference
+ SimpleTensor<T> a{ shape_a_collapsed, data_type, 1, lhs_q_info };
+ SimpleTensor<T> b{ shape_b_collapsed, data_type, 1, rhs_q_info };
+ SimpleTensor<T> c{ output_shape_collapsed, data_type, 1, dst_q_info };
+
+ // Fill reference
+ fill(a, _hash + 1);
+ fill(b, _hash + 2);
+
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+
+ // Define transposed shapes
+ TensorShape a_transposed_shape(a.shape());
+ a_transposed_shape.set(0, a.shape().y());
+ a_transposed_shape.set(1, a.shape().x());
+
+ TensorShape b_transposed_shape(b.shape());
+ b_transposed_shape.set(0, b.shape().y());
+ b_transposed_shape.set(1, b.shape().x());
+
+ // Define transposed tensors
+ SimpleTensor<T> a_transposed{ a_transposed_shape, data_type };
+ SimpleTensor<T> b_transposed{ b_transposed_shape, data_type };
+
+ // pretranspose a if necessary
+ if(pretranspose_a)
+ {
+ a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U));
+ }
+
+ // pretranspose b if necessary
+ if(pretranspose_b)
+ {
+ b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U));
+ }
+
+ // Use transposed tensors if boolean enabled else use original tensors
+ SimpleTensor<T> result = gemm_reference<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c);
+
+ // We reshape the gemm output back if the tensor is high dimensional
+ if(output_shape_collapsed != output_shape)
+ {
+ result = reference::reshape_layer(result, output_shape);
+ }
+
+ return result;
+ }
+
+ template <typename U = T>
+ typename std::enable_if < std::is_same<U, float>::value || std::is_same<U, half>::value, SimpleTensor<U >>::type gemm_reference(SimpleTensor<U> &a, SimpleTensor<U> &b, SimpleTensor<U> &c)
+ {
+ // Fill bias, then copy first dimension into subsequent dimensions to mimic broadcast
+ // of bias tensor from shape [dst.dimension(0)] to [dst.tensor_shape()] in target kernel
+ if(_enable_bias)
+ {
+ fill(c, _hash + 3);
+ const int n = c.shape().x();
+ const int other_dims = c.shape().collapsed_from(1)[1];
+ for(int i = 1; i < other_dims; ++i) // For all data, copy first n elements into remaining batches
+ {
+ memcpy(c.data() + i * n, c.data(), n * sizeof(T));
+ }
+ }
+ // Setting beta to 0 will effectively disable C for the
+ // computation of the reference: alpha * A * B + 0 * C
+ return reference::gemm<U>(a, b, c, 1.0f, (_enable_bias) ? 1.0f : 0.f);
+ }
+
+ template <typename U = T>
+ typename std::enable_if < std::is_same<U, int8_t>::value || std::is_same<U, uint8_t>::value, SimpleTensor<U >>::type gemm_reference(SimpleTensor<U> &a, SimpleTensor<U> &b, SimpleTensor<U> &c)
+ {
+ const UniformQuantizationInfo aq = a.quantization_info().uniform();
+ const UniformQuantizationInfo bq = b.quantization_info().uniform();
+ const UniformQuantizationInfo cq = c.quantization_info().uniform();
+
+ const SimpleTensor<int32_t> result = reference::gemmlowp_matrix_multiply_core<int32_t, U, U>(a, b, c.shape(), -aq.offset, -bq.offset);
+
+ std::vector<int32_t> gemmlowp_multipliers{ 1 };
+ std::vector<int32_t> gemmlowp_shifts{ 1 };
+ const int gemmlowp_offset = cq.offset;
+ const float scale = aq.scale * bq.scale / cq.scale;
+
+ quantization::calculate_quantized_multiplier(scale, &gemmlowp_multipliers[0], &gemmlowp_shifts[0]);
+ constexpr int32_t gemmlowp_min_bound = std::numeric_limits<int32_t>::min();
+ constexpr int32_t gemmlowp_max_bound = std::numeric_limits<int32_t>::max();
+
+ SimpleTensor<int> bias{ c.shape(), DataType::S32 };
+ if(_enable_bias)
+ {
+ // Identical to float implementation, fill and copy values of bias first dimension
+ fill_bias_s32(bias, _hash + 3, _min_bias, _max_bias);
+ const int n = bias.shape().x();
+ const int other_dims = bias.shape().collapsed_from(1)[1];
+ const unsigned int dt_size = sizeof(int32_t);
+ for(int i = 1; i < other_dims; ++i)
+ {
+ memcpy(bias.data() + i * n, bias.data(), n * dt_size);
+ }
+ }
+ else
+ {
+ fill_constant(bias, static_cast<int32_t>(0)); // effectively disable bias
+ }
+
+ const SimpleTensor<U> final_result = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, U>(result, bias,
+ gemmlowp_multipliers, gemmlowp_shifts, gemmlowp_offset, gemmlowp_min_bound, gemmlowp_max_bound);
+
+ return final_result;
+ }
+
+ CLTensor _target{};
+ SimpleTensor<T> _reference{};
+ bool _enable_bias{ false };
+ bool _device_supports_export_to_cl_image{ true };
+ bool _device_supports_mmul{ true };
+ int32_t _min_bias{ 0 };
+ int32_t _max_bias{ 0 };
+ int32_t _hash{ 0 };
+};
+
+template <typename T, typename KernelType, bool use_mmul = false>
+class MatMulKernelValidationFixture : public MatMulKernelGenericValidationFixture<T, KernelType, use_mmul>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type)
+ {
+ MatMulKernelGenericValidationFixture<T, KernelType, use_mmul>::setup(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type,
+ false /* enable bias */);
+ }
+};
+
+template <typename T, typename KernelType, bool use_mmul = false>
+class MatMulKernelWithBiasValidation : public MatMulKernelGenericValidationFixture<T, KernelType, use_mmul>
+{
+public:
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type)
+ {
+ MatMulKernelGenericValidationFixture<T, KernelType, use_mmul>::setup(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type,
+ true /* enable bias */);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE_H
diff --git a/tests/validation/fixtures/MaxUnpoolingLayerFixture.h b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h
new file mode 100644
index 0000000000..808e3ffabd
--- /dev/null
+++ b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2020-2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/MaxUnpoolingLayer.h"
+#include "tests/validation/reference/PoolingLayer.h"
+#include <random>
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename PoolingFunctionType, typename MaxUnpoolingFunctionType, typename T>
+class MaxUnpoolingLayerValidationGenericFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout)
+ {
+ std::mt19937 gen(library->seed());
+ std::uniform_int_distribution<> offset_dis(0, 20);
+ const float scale = data_type == DataType::QASYMM8_SIGNED ? 1.f / 127.f : 1.f / 255.f;
+ const int scale_in = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen);
+ const int scale_out = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen);
+ const QuantizationInfo input_qinfo(scale, scale_in);
+ const QuantizationInfo output_qinfo(scale, scale_out);
+ _pool_info = pool_info;
+ _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo);
+ _reference = compute_reference(shape, pool_info, data_type, input_qinfo, output_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ if(tensor.data_type() == DataType::F32)
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, 0);
+ }
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, 0);
+ }
+ else // data type is quantized_asymmetric
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+ }
+
+ TensorType compute_target(TensorShape input_shape, PoolingLayerInfo pool_info,
+ DataType data_type, DataLayout data_layout,
+ QuantizationInfo input_qinfo, QuantizationInfo output_qinfo)
+ {
+ // Change shape in case of NHWC.
+ if(data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_qinfo, data_layout);
+ const TensorShape dst_shape = misc::shape_calculator::compute_pool_shape(*(src.info()), pool_info);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, data_layout);
+ TensorType unpooled = create_tensor<TensorType>(input_shape, data_type, 1, output_qinfo, data_layout);
+ TensorType indices = create_tensor<TensorType>(dst_shape, DataType::U32, 1, output_qinfo, data_layout);
+
+ // Create and configure function
+ PoolingFunctionType pool_layer;
+ pool_layer.configure(&src, &dst, pool_info, &indices);
+ // Create and configure function
+
+ MaxUnpoolingFunctionType unpool_layer;
+ unpool_layer.configure(&dst, &indices, &unpooled, pool_info);
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(indices.info()->is_resizable());
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+ indices.allocator()->allocate();
+ unpooled.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!indices.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!unpooled.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ pool_layer.run();
+ unpool_layer.run();
+ return unpooled;
+ }
+
+ SimpleTensor<T> compute_reference(TensorShape input_shape, PoolingLayerInfo info, DataType data_type,
+ QuantizationInfo input_qinfo, QuantizationInfo output_qinfo)
+ {
+ SimpleTensor<T> src(input_shape, data_type, 1, input_qinfo);
+ SimpleTensor<uint32_t> indices{};
+ // Fill reference
+ fill(src);
+ auto pooled_tensor = reference::pooling_layer<T>(src, info, output_qinfo, &indices);
+ return reference::max_unpooling_layer<T>(pooled_tensor, info, output_qinfo, indices, input_shape);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ PoolingLayerInfo _pool_info{};
+};
+
+template <typename TensorType, typename AccessorType, typename F1, typename F2, typename T>
+class MaxUnpoolingLayerValidationFixture : public MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T>
+{
+public:
+ void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, DataType data_type, DataLayout data_layout)
+ {
+ MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, true),
+ data_type, data_layout);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE */
diff --git a/tests/validation/fixtures/MeanStdDevFixture.h b/tests/validation/fixtures/MeanStdDevFixture.h
deleted file mode 100644
index 58d4644069..0000000000
--- a/tests/validation/fixtures/MeanStdDevFixture.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_MEAN_STD_DEV_FIXTURE
-#define ARM_COMPUTE_TEST_MEAN_STD_DEV_FIXTURE
-
-#include "tests/Globals.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/MeanStdDev.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class MeanStdDevValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type)
- {
- _target = compute_target(shape, data_type);
- _reference = compute_reference(shape, data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- if(is_data_type_float(tensor.data_type()))
- {
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
- library->fill(tensor, distribution, 0);
- }
- else
- {
- library->fill_tensor_uniform(tensor, 0);
- }
- }
-
- std::pair<float, float> compute_target(const TensorShape &shape, DataType data_type)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
-
- // Create output variables
- float mean = 0.0f;
- float std_dev = 0.0f;
-
- // Create and configure function
- FunctionType mean_std_dev;
- mean_std_dev.configure(&src, &mean, &std_dev);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- mean_std_dev.run();
-
- return std::make_pair(mean, std_dev);
- }
-
- std::pair<float, float> compute_reference(const TensorShape &shape, DataType data_type)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::mean_and_standard_deviation<T>(src);
- }
-
- std::pair<float, float> _target{};
- std::pair<float, float> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_MEAN_STD_DEV_FIXTURE */
diff --git a/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h b/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h
index 1c48b74baf..bf5d20790c 100644
--- a/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,29 +44,35 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class MeanStdDevNormalizationLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape shape, DataType dt, bool in_place, float epsilon = 1e-8f)
+ void setup(TensorShape shape, DataType dt, bool in_place, float epsilon = 1e-8)
{
- _data_type = dt;
- _target = compute_target(shape, dt, in_place, epsilon);
- _reference = compute_reference(shape, dt, epsilon);
+ QuantizationInfo qi = QuantizationInfo(0.5f, 10);
+ _data_type = dt;
+ _target = compute_target(shape, dt, in_place, epsilon, qi);
+ _reference = compute_reference(shape, dt, epsilon, qi);
}
protected:
template <typename U>
- void fill(U &&src_tensor)
+ void fill(U &&tensor)
{
- const float min_bound = -1.f;
- const float max_bound = 1.f;
- std::uniform_real_distribution<> distribution(min_bound, max_bound);
- library->fill(src_tensor, distribution, 0);
+ if(is_data_type_float(_data_type))
+ {
+ std::uniform_real_distribution<> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, 0);
+ }
+ else
+ {
+ std::uniform_int_distribution<> distribution{ 0, 255 };
+ library->fill(tensor, distribution, 0);
+ }
}
- TensorType compute_target(TensorShape shape, DataType dt, bool in_place, float epsilon)
+ TensorType compute_target(TensorShape shape, DataType dt, bool in_place, float epsilon, QuantizationInfo qi)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, dt, 1);
- TensorType dst;
+ TensorType src = create_tensor<TensorType>(shape, dt, 1, qi);
+ TensorType dst = create_tensor<TensorType>(shape, dt, 1, qi);
TensorType *dst_ptr = in_place ? &src : &dst;
@@ -74,17 +80,17 @@ protected:
FunctionType norm;
norm.configure(&src, dst_ptr, epsilon);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
if(!in_place)
{
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
}
// Fill tensors
@@ -103,10 +109,10 @@ protected:
}
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType dt, float epsilon)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType dt, float epsilon, QuantizationInfo qi)
{
// Create reference
- SimpleTensor<T> ref_src{ shape, dt, 1 };
+ SimpleTensor<T> ref_src{ shape, dt, 1, qi };
// Fill reference
fill(ref_src);
@@ -118,6 +124,7 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/fixtures/Median3x3Fixture.h b/tests/validation/fixtures/Median3x3Fixture.h
deleted file mode 100644
index 094635804b..0000000000
--- a/tests/validation/fixtures/Median3x3Fixture.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_MEDIAN3X3_FIXTURE
-#define ARM_COMPUTE_TEST_MEDIAN3X3_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Median3x3.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class Median3x3ValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const uint8_t constant_border_value = distribution(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, data_type, border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType median3x3;
- median3x3.configure(&src, &dst, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- median3x3.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, BorderMode border_mode, uint8_t constant_border_value)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::median3x3<T>(src, border_mode, constant_border_value);
- }
-
- BorderMode _border_mode{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_MEDIAN3X3_FIXTURE */
diff --git a/tests/validation/fixtures/MinMaxLocationFixture.h b/tests/validation/fixtures/MinMaxLocationFixture.h
deleted file mode 100644
index 120a5c49c4..0000000000
--- a/tests/validation/fixtures/MinMaxLocationFixture.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_MIN_MAX_LOCATION_FIXTURE
-#define ARM_COMPUTE_TEST_MIN_MAX_LOCATION_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/Types.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/MinMaxLocation.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename ArrayType, typename ArrayAccessorType, typename FunctionType, typename T>
-class MinMaxLocationValidationFixture : public framework::Fixture
-{
-public:
- using target_type = typename std::conditional<std::is_integral<T>::value, int32_t, float>::type;
-
- template <typename...>
- void setup(TensorShape shape, DataType data_type)
- {
- _target = compute_target(shape, data_type);
- _reference = compute_reference(shape, data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- MinMaxLocationValues<target_type> compute_target(const TensorShape &shape, DataType data_type)
- {
- MinMaxLocationValues<target_type> target;
-
- ArrayType min_loc(shape.total_size());
- ArrayType max_loc(shape.total_size());
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType min_max_loc;
- min_max_loc.configure(&src, &target.min, &target.max, &min_loc, &max_loc);
-
- // Allocate tensors
- src.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- min_max_loc.run();
-
- // Create accessor objects for mapping operations
- ArrayAccessorType min_loc_accessor(min_loc);
- ArrayAccessorType max_loc_accessor(max_loc);
-
- // Move min Coordinates2D values from ArrayType to vector
- for(size_t i = 0; i < min_loc.num_values(); ++i)
- {
- target.min_loc.push_back(std::move(min_loc_accessor.at(i)));
- }
-
- // Move max Coordinates2D values from ArrayType to vector
- for(size_t i = 0; i < max_loc.num_values(); ++i)
- {
- target.max_loc.push_back(std::move(max_loc_accessor.at(i)));
- }
-
- return target;
- }
-
- MinMaxLocationValues<T> compute_reference(const TensorShape &shape, DataType data_type)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- return reference::min_max_location<T>(src);
- }
-
- MinMaxLocationValues<target_type> _target{};
- MinMaxLocationValues<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_MIN_MAX_LOCATION_FIXTURE */
diff --git a/tests/validation/fixtures/NonLinearFilterFixture.h b/tests/validation/fixtures/NonLinearFilterFixture.h
deleted file mode 100644
index 78ba0eaf86..0000000000
--- a/tests/validation/fixtures/NonLinearFilterFixture.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_NONLINEAR_FILTER_FIXTURE
-#define ARM_COMPUTE_TEST_NONLINEAR_FILTER_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/NonLinearFilter.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class NonLinearFilterValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, NonLinearFilterFunction function, unsigned int mask_size, MatrixPattern pattern, BorderMode border_mode, DataType data_type)
- {
- std::mt19937 generator(library->seed());
- std::uniform_int_distribution<uint8_t> distribution_u8(0, 255);
- const uint8_t constant_border_value = distribution_u8(generator);
-
- // Create the mask
- std::vector<uint8_t> mask(mask_size * mask_size);
- fill_mask_from_pattern(mask.data(), mask_size, mask_size, pattern);
-
- _border_size = BorderSize(static_cast<int>(mask_size / 2));
- _target = compute_target(shape, data_type, function, mask_size, pattern, mask.data(), border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, function, mask_size, pattern, mask.data(), border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, NonLinearFilterFunction function, unsigned int mask_size, MatrixPattern pattern, const uint8_t *mask, BorderMode border_mode,
- uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType non_linear_filter;
- non_linear_filter.configure(&src, &dst, function, mask_size, pattern, mask, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- non_linear_filter.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, NonLinearFilterFunction function, unsigned int mask_size, MatrixPattern pattern, const uint8_t *mask,
- BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- return reference::non_linear_filter<T>(src, function, mask_size, pattern, mask, border_mode, constant_border_value);
- }
-
- BorderMode _border_mode{};
- BorderSize _border_size{};
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_NONLINEAR_FILTER_FIXTURE */
diff --git a/tests/validation/fixtures/NonMaxSuppressionFixture.h b/tests/validation/fixtures/NonMaxSuppressionFixture.h
index 9299ed62a4..043b4731aa 100644
--- a/tests/validation/fixtures/NonMaxSuppressionFixture.h
+++ b/tests/validation/fixtures/NonMaxSuppressionFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType>
class NMSValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int max_output_size, float score_threshold, float nms_threshold)
{
ARM_COMPUTE_ERROR_ON(max_output_size == 0);
@@ -59,9 +58,9 @@ public:
protected:
template <typename U>
- void fill(U &&tensor, int i, int lo, int hi)
+ void fill(U &&tensor, int i, float lo, float hi)
{
- std::uniform_real_distribution<> distribution(lo, hi);
+ std::uniform_real_distribution<float> distribution(lo, hi);
library->fill_boxes(tensor, distribution, i);
}
@@ -77,18 +76,18 @@ protected:
FunctionType nms_func;
nms_func.configure(&bboxes, &scores, &indices, max_output_size, score_threshold, nms_threshold);
- ARM_COMPUTE_EXPECT(bboxes.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(indices.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(scores.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bboxes.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(indices.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(scores.info()->is_resizable());
// Allocate tensors
bboxes.allocator()->allocate();
indices.allocator()->allocate();
scores.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!bboxes.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!indices.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!scores.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bboxes.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!indices.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!scores.info()->is_resizable());
// Fill tensors
fill(AccessorType(bboxes), 0, 0.f, 1.f);
diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h
index 4d6ef7019f..ddaa3533f5 100644
--- a/tests/validation/fixtures/NormalizationLayerFixture.h
+++ b/tests/validation/fixtures/NormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class NormalizationValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, DataLayout data_layout)
{
NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled);
@@ -59,7 +58,10 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, 0);
}
@@ -78,15 +80,15 @@ protected:
FunctionType norm_layer;
norm_layer.configure(&src, &dst, info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -116,7 +118,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class NormalizationValidationFixture : public NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, DataLayout data_layout)
{
NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type, data_layout);
diff --git a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h
index b46bd3c407..5f2c865950 100644
--- a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h
+++ b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class NormalizePlanarYUVLayerValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape0, TensorShape shape1, DataType dt, DataLayout data_layout, QuantizationInfo quantization_info)
{
_data_type = dt;
@@ -56,12 +55,14 @@ protected:
template <typename U>
void fill(U &&src_tensor, U &&mean_tensor, U &&std_tensor)
{
+ using FloatDistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<float>>::type;
+
if(is_data_type_float(_data_type))
{
- const float min_bound = -1.f;
- const float max_bound = 1.f;
- std::uniform_real_distribution<> distribution(min_bound, max_bound);
- std::uniform_real_distribution<> distribution_std(0.1, max_bound);
+ const T min_bound = T(-1.f);
+ const T max_bound = T(1.f);
+ FloatDistributionType distribution(min_bound, max_bound);
+ FloatDistributionType distribution_std(T(0.1f), max_bound);
library->fill(src_tensor, distribution, 0);
library->fill(mean_tensor, distribution, 1);
library->fill(std_tensor, distribution_std, 2);
@@ -95,10 +96,10 @@ protected:
FunctionType norm;
norm.configure(&src, &dst, &mean, &std);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(std.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(std.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -106,10 +107,10 @@ protected:
mean.allocator()->allocate();
std.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!std.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!std.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), AccessorType(mean), AccessorType(std));
@@ -142,7 +143,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class NormalizePlanarYUVLayerValidationFixture : public NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape0, TensorShape shape1, DataType dt, DataLayout data_layout)
{
NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, dt, data_layout, QuantizationInfo());
@@ -153,7 +153,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class NormalizePlanarYUVLayerValidationQuantizedFixture : public NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape0, TensorShape shape1, DataType dt, DataLayout data_layout, QuantizationInfo quantization_info)
{
NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, dt, data_layout, quantization_info);
diff --git a/tests/validation/fixtures/OpticalFlowFixture.h b/tests/validation/fixtures/OpticalFlowFixture.h
deleted file mode 100644
index f8f2021533..0000000000
--- a/tests/validation/fixtures/OpticalFlowFixture.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_OPTICAL_FLOW
-#define ARM_COMPUTE_TEST_OPTICAL_FLOW
-
-#include "arm_compute/core/PyramidInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/Types.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/OpticalFlow.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType,
- typename AccessorType,
- typename ArrayType,
- typename ArrayAccessorType,
- typename FunctionType,
- typename PyramidType,
- typename PyramidFunctionType,
- typename T>
-
-class OpticalFlowValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(std::string old_image_name, std::string new_image_name, OpticalFlowParameters params,
- size_t num_levels, size_t num_keypoints, Format format, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> int_dist(0, 255);
- const uint8_t constant_border_value = int_dist(gen);
-
- // Create keypoints
- std::vector<KeyPoint> old_keypoints = generate_random_keypoints(library->get_image_shape(old_image_name), num_keypoints, library->seed(), num_levels);
- std::vector<KeyPoint> new_keypoints_estimates = old_keypoints;
-
- _target = compute_target(old_image_name, new_image_name, params, num_levels, old_keypoints, new_keypoints_estimates, format, border_mode, constant_border_value);
- _reference = compute_reference(old_image_name, new_image_name, params, num_levels, old_keypoints, new_keypoints_estimates, format, border_mode, constant_border_value);
- }
-
-protected:
- template <typename V>
- void fill(V &&tensor, const std::string image, Format format)
- {
- library->fill(tensor, image, format);
- }
-
- ArrayType compute_target(std::string old_image_name, std::string new_image_name, OpticalFlowParameters params, size_t num_levels,
- std::vector<KeyPoint> &old_keypoints, std::vector<KeyPoint> &new_keypoints_estimates,
- Format format, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Get image shapes
- TensorShape old_shape = library->get_image_shape(old_image_name);
- TensorShape new_shape = library->get_image_shape(new_image_name);
-
- // Create tensors
- auto old_image = create_tensor<TensorType>(old_shape, format);
- auto new_image = create_tensor<TensorType>(new_shape, format);
-
- // Load keypoints
- ArrayType old_points(old_keypoints.size());
- ArrayType new_points_estimates(new_keypoints_estimates.size());
- ArrayType new_points(old_keypoints.size());
-
- fill_array(ArrayAccessorType(old_points), old_keypoints);
- fill_array(ArrayAccessorType(new_points_estimates), new_keypoints_estimates);
-
- // Create pyramid images
- PyramidInfo pyramid_info(num_levels, SCALE_PYRAMID_HALF, old_image.info()->tensor_shape(), format);
- PyramidType old_pyramid = create_pyramid<PyramidType>(pyramid_info);
- PyramidType new_pyramid = create_pyramid<PyramidType>(pyramid_info);
-
- // Create and configure pyramid functions
- PyramidFunctionType old_gp;
- old_gp.configure(&old_image, &old_pyramid, border_mode, constant_border_value);
-
- PyramidFunctionType new_gp;
- new_gp.configure(&new_image, &new_pyramid, border_mode, constant_border_value);
-
- for(size_t i = 0; i < pyramid_info.num_levels(); ++i)
- {
- ARM_COMPUTE_EXPECT(old_pyramid.get_pyramid_level(i)->info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(new_pyramid.get_pyramid_level(i)->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Create and configure optical flow function
- FunctionType optical_flow;
-
- optical_flow.configure(&old_pyramid,
- &new_pyramid,
- &old_points,
- &new_points_estimates,
- &new_points,
- params.termination,
- params.epsilon,
- params.num_iterations,
- params.window_dimension,
- params.use_initial_estimate,
- border_mode,
- constant_border_value);
-
- ARM_COMPUTE_EXPECT(old_image.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(new_image.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate input tensors
- old_image.allocator()->allocate();
- new_image.allocator()->allocate();
-
- // Allocate pyramids
- old_pyramid.allocate();
- new_pyramid.allocate();
-
- ARM_COMPUTE_EXPECT(!old_image.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!new_image.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- for(size_t i = 0; i < pyramid_info.num_levels(); ++i)
- {
- ARM_COMPUTE_EXPECT(!old_pyramid.get_pyramid_level(i)->info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!new_pyramid.get_pyramid_level(i)->info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Fill tensors
- fill(AccessorType(old_image), old_image_name, format);
- fill(AccessorType(new_image), new_image_name, format);
-
- // Compute functions
- old_gp.run();
- new_gp.run();
- optical_flow.run();
-
- return new_points;
- }
-
- std::vector<KeyPoint> compute_reference(std::string old_image_name, std::string new_image_name,
- OpticalFlowParameters params, size_t num_levels,
- std::vector<KeyPoint> &old_keypoints, std::vector<KeyPoint> &new_keypoints_estimates,
- Format format, BorderMode border_mode, uint8_t constant_border_value)
- {
- SimpleTensor<T> old_image{ library->get_image_shape(old_image_name), data_type_from_format(format) };
- SimpleTensor<T> new_image{ library->get_image_shape(new_image_name), data_type_from_format(format) };
-
- fill(old_image, old_image_name, format);
- fill(new_image, new_image_name, format);
-
- return reference::optical_flow<T>(old_image, new_image, params, num_levels, old_keypoints, new_keypoints_estimates,
- border_mode, constant_border_value);
- }
-
- ArrayType _target{};
- std::vector<KeyPoint> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_OPTICAL_FLOW */
diff --git a/tests/validation/fixtures/PadLayerFixture.h b/tests/validation/fixtures/PadLayerFixture.h
index 58ca81f0df..93b43616ff 100644
--- a/tests/validation/fixtures/PadLayerFixture.h
+++ b/tests/validation/fixtures/PadLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PaddingFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, const PaddingList &padding, const PaddingMode mode)
{
PaddingList clamped_padding = padding;
@@ -94,15 +93,15 @@ protected:
FunctionType padding;
padding.configure(&src, &dst, paddings, const_value, mode);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
diff --git a/tests/validation/fixtures/PermuteFixture.h b/tests/validation/fixtures/PermuteFixture.h
index 76351734d5..b1b3845a8d 100644
--- a/tests/validation/fixtures/PermuteFixture.h
+++ b/tests/validation/fixtures/PermuteFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PermuteValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, PermutationVector perm, DataType data_type)
{
_target = compute_target(input_shape, data_type, perm);
@@ -73,15 +72,15 @@ protected:
FunctionType perm_func;
perm_func.configure(&src, &dst, perm);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/PhaseFixture.h b/tests/validation/fixtures/PhaseFixture.h
deleted file mode 100644
index 09badcfd2a..0000000000
--- a/tests/validation/fixtures/PhaseFixture.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_PHASE_FIXTURE
-#define ARM_COMPUTE_TEST_PHASE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Phase.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class PhaseValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, Format format, PhaseType phase_type)
- {
- _target = compute_target(shape, format, phase_type);
- _reference = compute_reference(shape, format, phase_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, std::random_device::result_type seed_offset)
- {
- library->fill_tensor_uniform(tensor, seed_offset);
- }
-
- TensorType compute_target(const TensorShape &shape, Format format, PhaseType phase_type)
- {
- DataType data_type = data_type_from_format(format);
-
- // Create tensors
- TensorType src1 = create_tensor<TensorType>(shape, data_type);
- src1.info()->set_format(format);
-
- TensorType src2 = create_tensor<TensorType>(shape, data_type);
- src2.info()->set_format(format);
-
- TensorType dst = create_tensor<TensorType>(shape, DataType::U8);
- dst.info()->set_format(Format::U8);
-
- // Create and configure function
- FunctionType phase;
-
- phase.configure(&src1, &src2, &dst, phase_type);
-
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src1.allocator()->allocate();
- src2.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src1), 0);
- fill(AccessorType(src2), 1);
-
- // Compute function
- phase.run();
-
- return dst;
- }
-
- SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, Format format, PhaseType phase_type)
- {
- DataType data_type = data_type_from_format(format);
-
- // Create reference
- SimpleTensor<T> src1{ shape, data_type };
- SimpleTensor<T> src2{ shape, data_type };
-
- // Fill reference
- fill(src1, 0);
- fill(src2, 1);
-
- return reference::phase<T>(src1, src2, phase_type);
- }
-
- TensorType _target{};
- SimpleTensor<uint8_t> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_PHASE_FIXTURE */
diff --git a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
index f561a37a71..4345d8a13f 100644
--- a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
+++ b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PixelWiseMultiplicationGenericValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(const TensorShape &shape0,
const TensorShape &shape1,
DataType dt_in1,
@@ -56,10 +55,12 @@ public:
QuantizationInfo qinfo0,
QuantizationInfo qinfo1,
QuantizationInfo qinfo_out,
- ActivationLayerInfo act_info)
+ ActivationLayerInfo act_info,
+ bool is_inplace)
{
- _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
- _reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
+ _is_inplace = is_inplace;
+ _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
+ _reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
}
protected:
@@ -74,26 +75,49 @@ protected:
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
{
// Create tensors
- TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0);
- TensorType src2 = create_tensor<TensorType>(shape1, dt_in2, 1, qinfo1);
- TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), dt_out, 1, qinfo_out);
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0);
+ TensorType src2 = create_tensor<TensorType>(shape1, dt_in2, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(out_shape, dt_out, 1, qinfo_out);
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if(_is_inplace)
+ {
+ bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out) && (dt_in1 == dt_out);
+ bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out) && (dt_in2 == dt_out);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if(src1_is_inplace)
+ {
+ actual_dst = &src1;
+ }
+ else
+ {
+ actual_dst = &src2;
+ }
+ }
+
+ auto allocate_tensor = [](TensorType & t)
+ {
+ ARM_COMPUTE_ASSERT(t.info()->is_resizable());
+ t.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!t.info()->is_resizable());
+ };
// Create and configure function
FunctionType multiply;
- multiply.configure(&src1, &src2, &dst, scale, convert_policy, rounding_policy, act_info);
+ multiply.configure(&src1, &src2, actual_dst, scale, convert_policy, rounding_policy, act_info);
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ allocate_tensor(src1);
+ allocate_tensor(src2);
- // Allocate tensors
- src1.allocator()->allocate();
- src2.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ // If don't do in-place computation, still need to allocate original dst
+ if(!_is_inplace)
+ {
+ allocate_tensor(dst);
+ }
// Fill tensors
fill(AccessorType(src1), 0);
@@ -102,7 +126,7 @@ protected:
// Compute function
multiply.run();
- return dst;
+ return std::move(*actual_dst);
}
SimpleTensor<T3> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out,
@@ -123,29 +147,29 @@ protected:
TensorType _target{};
SimpleTensor<T3> _reference{};
+ bool _is_inplace{ false };
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class PixelWiseMultiplicationValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2>
+class PixelWiseMultiplicationValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
+ void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, bool is_inplace)
{
- PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
-class PixelWiseMultiplicationBroadcastValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2>
+class PixelWiseMultiplicationBroadcastValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>
{
public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
+ bool is_inplace)
{
- PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape0, shape1, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -153,11 +177,21 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PixelWiseMultiplicationValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
- template <typename...>
- void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info, bool is_inplace)
{
PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class PixelWiseMultiplicationValidationIntegerFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
+{
+public:
+ void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info, bool is_inplace)
+ {
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -165,12 +199,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PixelWiseMultiplicationBroadcastValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
{
public:
- template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
- ActivationLayerInfo act_info)
+ ActivationLayerInfo act_info, bool is_inplace)
{
PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape0, shape1, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -178,12 +211,23 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PixelWiseMultiplicationValidationQuantizedFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>
{
public:
- template <typename...>
void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
- qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2>
+class PixelWiseMultiplicationBroadcastValidationQuantizedFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>
+{
+public:
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
+ {
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/Pooling3dLayerFixture.h b/tests/validation/fixtures/Pooling3dLayerFixture.h
new file mode 100644
index 0000000000..1bdf615fb1
--- /dev/null
+++ b/tests/validation/fixtures/Pooling3dLayerFixture.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/Pooling3dLayer.h"
+#include <random>
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class Pooling3dLayerValidationGenericFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape, Pooling3dLayerInfo pool_info, DataType data_type, QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
+ {
+ _target = compute_target(shape, pool_info, data_type, input_qinfo, output_qinfo);
+ _reference = compute_reference(shape, pool_info, data_type, input_qinfo, output_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ if(tensor.data_type() == DataType::F32)
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, 0);
+ }
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, 0);
+ }
+ else // data type is quantized_asymmetric
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+ }
+
+ TensorType compute_target(TensorShape shape, Pooling3dLayerInfo info,
+ DataType data_type, QuantizationInfo input_qinfo, QuantizationInfo output_qinfo)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1, input_qinfo, DataLayout::NDHWC);
+ const TensorShape dst_shape = misc::shape_calculator::compute_pool3d_shape((src.info()->tensor_shape()), info);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, DataLayout::NDHWC);
+
+ // Create and configure function
+ FunctionType pool_layer;
+ pool_layer.validate(src.info(), dst.info(), info);
+ pool_layer.configure(&src, &dst, info);
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ pool_layer.run();
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(TensorShape shape, Pooling3dLayerInfo info, DataType data_type, QuantizationInfo input_qinfo, QuantizationInfo output_qinfo)
+ {
+ // Create reference
+ SimpleTensor<T> src(shape, data_type, 1, input_qinfo, DataLayout::NDHWC);
+ // Fill reference
+ fill(src);
+ return reference::pooling_3d_layer<T>(src, info, output_qinfo);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class Pooling3dLayerValidationFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, PoolingType pool_type, Size3D pool_size, Size3D stride, Padding3D padding, bool exclude_padding, DataType data_type)
+ {
+ Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, Pooling3dLayerInfo(pool_type, pool_size, stride, padding, exclude_padding),
+ data_type);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class Pooling3dLayerValidationQuantizedFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, PoolingType pool_type, Size3D pool_size, Size3D stride, Padding3D padding, bool exclude_padding, DataType data_type,
+ QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
+ {
+ Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, Pooling3dLayerInfo(pool_type, pool_size, stride, padding, exclude_padding),
+ data_type, input_qinfo, output_qinfo);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class Pooling3dLayerGlobalValidationFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, PoolingType pool_type, DataType data_type)
+ {
+ Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, Pooling3dLayerInfo(pool_type), data_type);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SpecialPooling3dLayerValidationFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape src_shape, Pooling3dLayerInfo pool_info, DataType data_type)
+ {
+ Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE */
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index eb40cea0c2..59c920868b 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,29 +45,42 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PoolingLayerValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, bool indices = false)
+ void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, bool indices = false,
+ QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false)
{
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<> offset_dis(0, 20);
- const float scale = data_type == DataType::QASYMM8_SIGNED ? 1.f / 127.f : 1.f / 255.f;
- const int scale_in = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen);
- const int scale_out = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen);
- const QuantizationInfo input_qinfo(scale, scale_in);
- const QuantizationInfo output_qinfo(scale, scale_out);
-
- _pool_info = pool_info;
- _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
- _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
+ _mixed_layout = mixed_layout;
+ _pool_info = pool_info;
+ _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
+ _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor)
{
- if(!is_data_type_quantized(tensor.data_type()))
+ if(tensor.data_type() == DataType::F32)
{
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, 0);
+ }
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
library->fill(tensor, distribution, 0);
}
else // data type is quantized_asymmetric
@@ -86,7 +99,6 @@ protected:
{
permute(shape, PermutationVector(2U, 0U, 1U));
}
-
// Create tensors
TensorType src = create_tensor<TensorType>(shape, data_type, 1, input_qinfo, data_layout);
const TensorShape dst_shape = misc::shape_calculator::compute_pool_shape(*(src.info()), info);
@@ -97,25 +109,33 @@ protected:
FunctionType pool_layer;
pool_layer.configure(&src, &dst, info, (indices) ? &_target_indices : nullptr);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(_target_indices.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_target_indices.info()->is_resizable());
+
+ add_padding_x({ &src, &dst, &_target_indices }, data_layout);
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
_target_indices.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!_target_indices.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_target_indices.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
- // Compute function
- pool_layer.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(pool_layer, src, dst);
+ }
+ else
+ {
+ // Compute function
+ pool_layer.run();
+ }
return dst;
}
@@ -132,6 +152,7 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
PoolingLayerInfo _pool_info{};
+ bool _mixed_layout{ false };
TensorType _target_indices{};
SimpleTensor<uint32_t> _ref_indices{};
};
@@ -139,23 +160,22 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PoolingLayerIndicesValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout)
+ void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout, bool use_kernel_indices)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding),
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding, false,
+ true, use_kernel_indices),
data_type, data_layout, true);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class PoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding),
- data_type, data_layout);
+ data_type, data_layout, false, mixed_layout);
}
};
@@ -163,7 +183,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PoolingLayerValidationMixedPrecisionFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout, bool fp_mixed_precision = false)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding, fp_mixed_precision),
@@ -171,15 +190,15 @@ public:
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class PoolingLayerValidationQuantizedFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout = DataLayout::NCHW)
+ void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout = DataLayout::NCHW,
+ QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding),
- data_type, data_layout);
+ data_type, data_layout, false, input_qinfo, output_qinfo, mixed_layout);
}
};
@@ -187,10 +206,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SpecialPoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape src_shape, PoolingLayerInfo pool_info, DataType data_type)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW);
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, pool_info.data_layout);
}
};
@@ -198,7 +216,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class GlobalPoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, PoolingType pool_type, DataType data_type, DataLayout data_layout = DataLayout::NCHW)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, data_layout), data_type, data_layout);
diff --git a/tests/validation/fixtures/PriorBoxLayerFixture.h b/tests/validation/fixtures/PriorBoxLayerFixture.h
index fb15631789..0a76cfd155 100644
--- a/tests/validation/fixtures/PriorBoxLayerFixture.h
+++ b/tests/validation/fixtures/PriorBoxLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class PriorBoxLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, PriorBoxLayerInfo info, DataType data_type, DataLayout data_layout)
{
TensorInfo input_info(input_shape, 1, data_type);
@@ -73,18 +72,18 @@ protected:
FunctionType prior_box;
prior_box.configure(&src1, &src2, &dst, info);
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src1.allocator()->allocate();
src2.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!src2.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Compute function
prior_box.run();
diff --git a/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h b/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h
index cee39c2c82..e864b4affe 100644
--- a/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h
+++ b/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 ARM Limited.
+ * Copyright (c) 2020-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class QLSTMLayerNormalizationValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weight_shape, TensorShape bias_shape, DataType data_type, QuantizationInfo weight_qinfo)
{
ARM_COMPUTE_ERROR_ON(data_type != DataType::QSYMM16);
@@ -91,9 +90,9 @@ protected:
{
for(auto t : tensors)
{
- ARM_COMPUTE_EXPECT(t->info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(t->info()->is_resizable());
t->allocator()->allocate();
- ARM_COMPUTE_EXPECT(!t->info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!t->info()->is_resizable());
}
}
diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h
index 085abefffc..1b21967bda 100644
--- a/tests/validation/fixtures/QuantizationLayerFixture.h
+++ b/tests/validation/fixtures/QuantizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class QuantizationValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in)
{
_target = compute_target(shape, data_type_in, data_type_out, qinfo, qinfo_in);
@@ -70,15 +69,15 @@ protected:
FunctionType quantization_layer;
quantization_layer.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -108,7 +107,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class QuantizationValidationFixture : public QuantizationValidationGenericFixture<TensorType, AccessorType, FunctionType, Tin, Tout>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
{
QuantizationValidationGenericFixture<TensorType, AccessorType, FunctionType, Tin, Tout>::setup(shape, data_type_in, data_type_out, qinfo, QuantizationInfo());
diff --git a/tests/validation/fixtures/RNNLayerFixture.h b/tests/validation/fixtures/RNNLayerFixture.h
index 2645116b44..e9a05e7838 100644
--- a/tests/validation/fixtures/RNNLayerFixture.h
+++ b/tests/validation/fixtures/RNNLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class RNNLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape recurrent_weights_shape, TensorShape bias_shape, TensorShape output_shape, ActivationLayerInfo info,
DataType data_type)
{
@@ -54,7 +53,10 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
@@ -73,12 +75,12 @@ protected:
FunctionType rnn;
rnn.configure(&input, &weights, &recurrent_weights, &bias, &hidden_state, &output, info);
- ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(recurrent_weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(hidden_state.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(recurrent_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(hidden_state.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
// Allocate tensors
input.allocator()->allocate();
@@ -88,12 +90,12 @@ protected:
hidden_state.allocator()->allocate();
output.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!recurrent_weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!hidden_state.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!recurrent_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!hidden_state.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
// Fill tensors
fill(AccessorType(input), 0);
diff --git a/tests/validation/fixtures/ROIAlignLayerFixture.h b/tests/validation/fixtures/ROIAlignLayerFixture.h
index e4470c99a0..ad76dcbbd9 100644
--- a/tests/validation/fixtures/ROIAlignLayerFixture.h
+++ b/tests/validation/fixtures/ROIAlignLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ROIAlignLayerGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
{
_rois_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::QASYMM16 : data_type;
@@ -138,18 +137,18 @@ protected:
FunctionType roi_align_layer;
roi_align_layer.configure(&src, &rois_tensor, &dst, pool_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(rois_tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rois_tensor.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
rois_tensor.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!rois_tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rois_tensor.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -189,7 +188,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ROIAlignLayerFixture : public ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T, TRois>
{
public:
- template <typename...>
void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout)
{
ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T, TRois>::setup(input_shape, pool_info, rois_shape, data_type, data_layout,
@@ -201,7 +199,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ROIAlignLayerQuantizedFixture : public ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T, TRois>
{
public:
- template <typename...>
void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type,
DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
{
diff --git a/tests/validation/fixtures/ROIPoolingLayerFixture.h b/tests/validation/fixtures/ROIPoolingLayerFixture.h
new file mode 100644
index 0000000000..4b46a6176d
--- /dev/null
+++ b/tests/validation/fixtures/ROIPoolingLayerFixture.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE
+#define ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ROIPoolingLayer.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIPoolingLayerGenericFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
+ {
+ _target = compute_target(input_shape, data_type, data_layout, pool_info, rois_shape, qinfo, output_qinfo);
+ _reference = compute_reference(input_shape, data_type, pool_info, rois_shape, qinfo, output_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+
+ template <typename U>
+ void generate_rois(U &&rois, const TensorShape &shape, const ROIPoolingLayerInfo &pool_info, TensorShape rois_shape, DataLayout data_layout = DataLayout::NCHW)
+ {
+ const size_t values_per_roi = rois_shape.x();
+ const size_t num_rois = rois_shape.y();
+
+ std::mt19937 gen(library->seed());
+ uint16_t *rois_ptr = static_cast<uint16_t *>(rois.data());
+
+ const float pool_width = pool_info.pooled_width();
+ const float pool_height = pool_info.pooled_height();
+ const float roi_scale = pool_info.spatial_scale();
+
+ // Calculate distribution bounds
+ const auto scaled_width = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)] / roi_scale) / pool_width);
+ const auto scaled_height = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)] / roi_scale) / pool_height);
+ const auto min_width = static_cast<float>(pool_width / roi_scale);
+ const auto min_height = static_cast<float>(pool_height / roi_scale);
+
+ // Create distributions
+ std::uniform_int_distribution<int> dist_batch(0, shape[3] - 1);
+ std::uniform_int_distribution<> dist_x1(0, scaled_width);
+ std::uniform_int_distribution<> dist_y1(0, scaled_height);
+ std::uniform_int_distribution<> dist_w(min_width, std::max(float(min_width), (pool_width - 2) * scaled_width));
+ std::uniform_int_distribution<> dist_h(min_height, std::max(float(min_height), (pool_height - 2) * scaled_height));
+
+ for(unsigned int pw = 0; pw < num_rois; ++pw)
+ {
+ const auto batch_idx = dist_batch(gen);
+ const auto x1 = dist_x1(gen);
+ const auto y1 = dist_y1(gen);
+ const auto x2 = x1 + dist_w(gen);
+ const auto y2 = y1 + dist_h(gen);
+
+ rois_ptr[values_per_roi * pw] = batch_idx;
+ rois_ptr[values_per_roi * pw + 1] = static_cast<uint16_t>(x1);
+ rois_ptr[values_per_roi * pw + 2] = static_cast<uint16_t>(y1);
+ rois_ptr[values_per_roi * pw + 3] = static_cast<uint16_t>(x2);
+ rois_ptr[values_per_roi * pw + 4] = static_cast<uint16_t>(y2);
+ }
+ }
+
+ TensorType compute_target(TensorShape input_shape,
+ DataType data_type,
+ DataLayout data_layout,
+ const ROIPoolingLayerInfo &pool_info,
+ const TensorShape rois_shape,
+ const QuantizationInfo &qinfo,
+ const QuantizationInfo &output_qinfo)
+ {
+ const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo();
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, qinfo, data_layout);
+ TensorType rois_tensor = create_tensor<TensorType>(rois_shape, _rois_data_type, 1, rois_qinfo);
+
+ // Initialise shape and declare output tensor dst
+ const TensorShape dst_shape;
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, data_layout);
+
+ // Create and configure function
+ FunctionType roi_pool_layer;
+ roi_pool_layer.configure(&src, &rois_tensor, &dst, pool_info);
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(rois_tensor.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ rois_tensor.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!rois_tensor.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(src));
+ generate_rois(AccessorType(rois_tensor), input_shape, pool_info, rois_shape, data_layout);
+
+ // Compute function
+ roi_pool_layer.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape,
+ DataType data_type,
+ const ROIPoolingLayerInfo &pool_info,
+ const TensorShape rois_shape,
+ const QuantizationInfo &qinfo,
+ const QuantizationInfo &output_qinfo)
+ {
+ // Create reference tensor
+ SimpleTensor<T> src{ input_shape, data_type, 1, qinfo };
+ const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo();
+ SimpleTensor<uint16_t> rois_tensor{ rois_shape, _rois_data_type, 1, rois_qinfo };
+
+ // Fill reference tensor
+ fill(src);
+ generate_rois(rois_tensor, input_shape, pool_info, rois_shape);
+
+ return reference::roi_pool_layer(src, rois_tensor, pool_info, output_qinfo);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ const DataType _rois_data_type{ DataType::U16 };
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIPoolingLayerQuantizedFixture : public ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type,
+ DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
+ {
+ ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape,
+ data_type, data_layout, qinfo, output_qinfo);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIPoolingLayerFixture : public ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout)
+ {
+ ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape, data_type, data_layout,
+ QuantizationInfo(), QuantizationInfo());
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE */ \ No newline at end of file
diff --git a/tests/validation/fixtures/RangeFixture.h b/tests/validation/fixtures/RangeFixture.h
index 4862069694..166613a318 100644
--- a/tests/validation/fixtures/RangeFixture.h
+++ b/tests/validation/fixtures/RangeFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,7 +55,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class RangeFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(const DataType data_type0, float start, float step, const QuantizationInfo qinfo0 = QuantizationInfo())
{
_target = compute_target(data_type0, qinfo0, start, step);
@@ -65,9 +64,9 @@ public:
protected:
float get_random_end(const DataType output_data_type, const QuantizationInfo qinfo_out, float start, float step)
{
- std::uniform_real_distribution<> distribution(1, 100);
- std::mt19937 gen(library->seed());
- float end = start;
+ std::uniform_real_distribution<float> distribution(1, 100);
+ std::mt19937 gen(library->seed());
+ float end = start;
switch(output_data_type)
{
case DataType::U8:
@@ -113,11 +112,11 @@ protected:
FunctionType range_func;
range_func.configure(&dst, start, end, step);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Compute function
range_func.run();
diff --git a/tests/validation/fixtures/ReduceMeanFixture.h b/tests/validation/fixtures/ReduceMeanFixture.h
index 44bb9fca6a..e61941435c 100644
--- a/tests/validation/fixtures/ReduceMeanFixture.h
+++ b/tests/validation/fixtures/ReduceMeanFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,7 @@
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/Tensor.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
@@ -46,50 +47,59 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReduceMeanValidationFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info)
+ void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output)
{
- _target = compute_target(shape, data_type, axis, keep_dims, quantization_info);
- _reference = compute_reference(shape, data_type, axis, keep_dims, quantization_info);
+ _target = compute_target(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output);
+ _reference = compute_reference(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output);
}
protected:
template <typename U>
void fill(U &&tensor)
{
- if(!is_data_type_quantized(tensor.data_type()))
+ if(tensor.data_type() == DataType::F32)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, 0);
}
- else
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, 0);
+ }
+ else if(is_data_type_quantized(tensor.data_type()))
{
std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
std::uniform_int_distribution<> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, 0);
}
+ else
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
}
- TensorType compute_target(TensorShape &src_shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info)
+ TensorType compute_target(TensorShape &src_shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(src_shape, data_type, 1, quantization_info);
- TensorType dst;
+ TensorType src = create_tensor<TensorType>(src_shape, data_type, 1, quantization_info_input);
+ TensorShape dst_shape = arm_compute::misc::shape_calculator::calculate_reduce_mean_shape(src.info(), axis, keep_dims);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, quantization_info_output);
// Create and configure function
FunctionType reduction_mean;
reduction_mean.configure(&src, axis, keep_dims, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -100,10 +110,10 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(TensorShape &src_shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info)
+ SimpleTensor<T> compute_reference(TensorShape &src_shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output)
{
// Create reference
- SimpleTensor<T> src{ src_shape, data_type, 1, quantization_info };
+ SimpleTensor<T> src{ src_shape, data_type, 1, quantization_info_input };
// Fill reference
fill(src);
@@ -113,7 +123,13 @@ protected:
{
TensorShape output_shape = i == 0 ? src_shape : out.shape();
output_shape.set(axis[i], 1);
- out = reference::reduction_operation<T, T>(i == 0 ? src : out, output_shape, axis[i], ReductionOperation::MEAN_SUM);
+ bool is_opencl = false;
+
+#ifdef ARM_COMPUTE_OPENCL_ENABLED
+ is_opencl = std::is_same<CLTensor, TensorType>::value; // Round down to zero on opencl to match kernel
+#endif /* ARM_COMPUTE_OPENCL_ENABLED */
+ out = reference::reduction_operation<T, T>(i == 0 ? src : out, output_shape, axis[i], ReductionOperation::MEAN_SUM, data_type, quantization_info_output,
+ is_opencl ? RoundingPolicy::TO_ZERO : RoundingPolicy::TO_NEAREST_UP);
}
if(!keep_dims)
@@ -122,7 +138,7 @@ protected:
std::sort(axis.begin(), axis.begin() + axis.num_dimensions());
for(unsigned int i = 0; i < axis.num_dimensions(); ++i)
{
- output_shape.remove_dimension(axis[i] - i);
+ output_shape.remove_dimension(axis[i] - i, false);
}
out = reference::reshape_layer(out, output_shape);
@@ -138,10 +154,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReduceMeanQuantizedFixture : public ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info = QuantizationInfo())
+ void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output)
{
- ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, quantization_info);
+ ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output);
}
};
@@ -149,10 +164,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReduceMeanFixture : public ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims)
{
- ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, QuantizationInfo());
+ ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, QuantizationInfo(), QuantizationInfo());
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/ReductionOperationFixture.h b/tests/validation/fixtures/ReductionOperationFixture.h
index a93bf49afd..b44f299486 100644
--- a/tests/validation/fixtures/ReductionOperationFixture.h
+++ b/tests/validation/fixtures/ReductionOperationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReductionOperationValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info, bool keep_dims = false)
{
const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
@@ -61,24 +60,29 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- if(!is_data_type_quantized(tensor.data_type()))
+ if(tensor.data_type() == DataType::F32)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, 0);
}
- else
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
+ library->fill(tensor, distribution, 0);
+ }
+ else if(is_data_type_quantized(tensor.data_type()))
{
if(tensor.data_type() == DataType::QASYMM8)
{
std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, 0);
}
else if(tensor.data_type() == DataType::QASYMM8_SIGNED)
{
std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, 0);
}
@@ -87,6 +91,10 @@ protected:
ARM_COMPUTE_ERROR("Not supported");
}
}
+ else
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
}
TensorType compute_target(const TensorShape &src_shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info)
@@ -99,15 +107,15 @@ protected:
FunctionType reduction_func;
reduction_func.configure(&src, &dst, axis, op, _keep_dims);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -126,7 +134,7 @@ protected:
// Fill reference
fill(src);
- return reference::reduction_operation<T, T>(src, dst_shape, axis, op);
+ return reference::reduction_operation<T, T>(src, dst_shape, axis, op, data_type, quantization_info);
}
TensorType _target{};
@@ -140,7 +148,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReductionOperationQuantizedFixture : public ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info = QuantizationInfo(), bool keep_dims = false)
{
ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, quantization_info, keep_dims);
@@ -151,7 +158,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReductionOperationFixture : public ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, bool keep_dims = false)
{
ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, QuantizationInfo(), keep_dims);
diff --git a/tests/validation/fixtures/RemapFixture.h b/tests/validation/fixtures/RemapFixture.h
deleted file mode 100644
index 78b30151ac..0000000000
--- a/tests/validation/fixtures/RemapFixture.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_REMAP_FIXTURE
-#define ARM_COMPUTE_TEST_REMAP_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Remap.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class RemapValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, InterpolationPolicy policy, DataType data_type, BorderMode border_mode)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution(0, 255);
- const T constant_border_value = static_cast<T>(distribution(gen));
-
- _target = compute_target(shape, policy, data_type, border_mode, constant_border_value);
- _reference = compute_reference(shape, policy, data_type, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i, float min, float max)
- {
- std::uniform_int_distribution<> distribution((int)min, (int)max);
- library->fill(tensor, distribution, i);
- }
-
- TensorType compute_target(const TensorShape &shape, InterpolationPolicy policy, DataType data_type, BorderMode border_mode, T constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType map_x = create_tensor<TensorType>(shape, DataType::F32);
- TensorType map_y = create_tensor<TensorType>(shape, DataType::F32);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType remap;
- remap.configure(&src, &map_x, &map_y, &dst, policy, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(map_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(map_y.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- map_x.allocator()->allocate();
- map_y.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!map_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!map_y.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0, 0, 255);
- fill(AccessorType(map_x), 1, -5, shape.x() + 5);
- fill(AccessorType(map_y), 2, -5, shape.y() + 5);
-
- // Compute function
- remap.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, InterpolationPolicy policy, DataType data_type, BorderMode border_mode, T constant_border_value)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
- SimpleTensor<float> map_x{ shape, DataType::F32 };
- SimpleTensor<float> map_y{ shape, DataType::F32 };
-
- // Create the valid mask Tensor
- _valid_mask = SimpleTensor<T> { shape, data_type };
-
- // Fill reference
- fill(src, 0, 0, 255);
- fill(map_x, 1, -5, shape.x() + 5);
- fill(map_y, 2, -5, shape.y() + 5);
-
- // Compute reference
- return reference::remap<T>(src, map_x, map_y, _valid_mask, policy, border_mode, constant_border_value);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- SimpleTensor<T> _valid_mask{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_REMAP_FIXTURE */
diff --git a/tests/validation/fixtures/ReorderFixture.h b/tests/validation/fixtures/ReorderFixture.h
new file mode 100644
index 0000000000..8e28484c48
--- /dev/null
+++ b/tests/validation/fixtures/ReorderFixture.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_REORDERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_REORDERFIXTURE_H
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/Reorder.h"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** [ReorderLayer fixture] **/
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ReorderValidationFixture : public framework::Fixture
+{
+public:
+ void check_hardware_supports(WeightFormat output_wf){
+ if(!Scheduler::get().cpu_info().has_sve() && output_wf!=WeightFormat::OHWIo4){
+ _hardware_supports = false;
+ }
+ if (Scheduler::get().cpu_info().has_sve() && arm_gemm::utils::get_vector_length<float>() != 8 && output_wf==WeightFormat::OHWIo8)
+ {
+ _hardware_supports = false;
+ }
+ }
+
+ void setup(TensorShape input_shape, TensorShape output_shape, WeightFormat input_wf, WeightFormat output_wf, DataType data_type)
+ {
+ check_hardware_supports(output_wf);
+ if (_hardware_supports){
+ _target = compute_target(input_shape, output_shape, input_wf, output_wf, data_type);
+ _reference = compute_reference(input_shape, output_shape, output_wf, data_type);
+ }
+ }
+
+ protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, WeightFormat input_wf, WeightFormat output_wf, DataType data_type)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type);
+
+ // Create and configure function
+ FunctionType reorder;
+
+ reorder.configure(&src, &dst, input_wf, output_wf);
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ reorder.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, WeightFormat output_wf, DataType data_type)
+ {
+ // Create reference
+ SimpleTensor<T> src{ input_shape, data_type };
+
+ // Fill reference
+ fill(src);
+
+ return reference::reorder_layer<T>(src, output_shape, output_wf);
+ }
+
+ bool _hardware_supports = true;
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+/** [ReorderLayer fixture] **/
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_REORDERFIXTURE_H
diff --git a/tests/validation/fixtures/ReorgLayerFixture.h b/tests/validation/fixtures/ReorgLayerFixture.h
index 3300e0dd7c..f87017190e 100644
--- a/tests/validation/fixtures/ReorgLayerFixture.h
+++ b/tests/validation/fixtures/ReorgLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReorgLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, int32_t stride, DataType data_type, DataLayout data_layout)
{
_target = compute_target(input_shape, stride, data_type, data_layout);
@@ -74,15 +73,15 @@ protected:
reorg.configure(&src, &dst, stride);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
diff --git a/tests/validation/fixtures/ReshapeLayerFixture.h b/tests/validation/fixtures/ReshapeLayerFixture.h
index 22f5b178b3..5be431f8cf 100644
--- a/tests/validation/fixtures/ReshapeLayerFixture.h
+++ b/tests/validation/fixtures/ReshapeLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_RESHAPE_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_RESHAPE_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_RESHAPELAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_RESHAPELAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -31,6 +31,7 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
#include "tests/validation/reference/ReshapeLayer.h"
namespace arm_compute
@@ -41,13 +42,12 @@ namespace validation
{
/** [ReshapeLayer fixture] **/
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ReshapeLayerValidationFixture : public framework::Fixture
+class ReshapeLayerGenericValidationFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type)
+ void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type, bool add_x_padding = false)
{
- _target = compute_target(input_shape, output_shape, data_type);
+ _target = compute_target(input_shape, output_shape, data_type, add_x_padding);
_reference = compute_reference(input_shape, output_shape, data_type);
}
@@ -58,10 +58,10 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type)
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type, bool add_x_padding = false)
{
// Check if indeed the input shape can be reshape to the output one
- ARM_COMPUTE_EXPECT(input_shape.total_size() == output_shape.total_size(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input_shape.total_size() == output_shape.total_size());
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, data_type);
@@ -72,15 +72,21 @@ protected:
reshape.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ if(add_x_padding)
+ {
+ // Add random padding in x dimension
+ add_padding_x({ &src, &dst });
+ }
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
@@ -105,8 +111,27 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ReshapeLayerValidationFixture : public ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type)
+ {
+ ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, output_shape, data_type);
+ }
+};
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ReshapeLayerPaddedValidationFixture : public ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type)
+ {
+ ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, output_shape, data_type, true /* add_x_padding */);
+ }
+};
/** [ReshapeLayer fixture] **/
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_RESHAPE_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_RESHAPELAYERFIXTURE_H
diff --git a/tests/validation/fixtures/ReverseFixture.h b/tests/validation/fixtures/ReverseFixture.h
index ed5253d041..856bff7b12 100644
--- a/tests/validation/fixtures/ReverseFixture.h
+++ b/tests/validation/fixtures/ReverseFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_REVERSE_FIXTURE
-#define ARM_COMPUTE_TEST_REVERSE_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_REVERSEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_REVERSEFIXTURE_H
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorShape.h"
@@ -45,11 +45,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ReverseValidationFixture : public framework::Fixture
{
public:
- template <typename...>
- void setup(TensorShape shape, TensorShape axis_shape, DataType data_type)
+ void setup(TensorShape shape, TensorShape axis_shape, DataType data_type, bool use_negative_axis = false, bool use_inverted_axis = false)
{
- _target = compute_target(shape, axis_shape, data_type);
- _reference = compute_reference(shape, axis_shape, data_type);
+ _num_dims = shape.num_dimensions();
+ _target = compute_target(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis);
+ _reference = compute_reference(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis);
}
protected:
@@ -58,16 +58,25 @@ protected:
{
library->fill_tensor_uniform(tensor, 0);
}
- std::vector<int> generate_random_axis()
+ std::vector<int32_t> generate_random_axis(bool use_negative = false)
{
- std::vector<int> axis_v = { 0, 1, 2, 3 };
- std::mt19937 g(0);
+ std::vector<int32_t> axis_v;
+ if(use_negative)
+ {
+ axis_v = { -1, -2, -3, -4 };
+ }
+ else
+ {
+ axis_v = { 0, 1, 2, 3 };
+ }
+ axis_v = std::vector<int32_t>(axis_v.begin(), axis_v.begin() + _num_dims);
+ std::mt19937 g(library->seed());
std::shuffle(axis_v.begin(), axis_v.end(), g);
return axis_v;
}
- TensorType compute_target(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type)
+ TensorType compute_target(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type, bool use_negative_axis, bool use_inverted_axis = false)
{
// Create tensors
TensorType src = create_tensor<TensorType>(shape, data_type, 1);
@@ -76,27 +85,27 @@ protected:
// Create and configure function
FunctionType reverse_func;
- reverse_func.configure(&src, &dst, &axis);
+ reverse_func.configure(&src, &dst, &axis, use_inverted_axis);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(axis.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(axis.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
axis.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!axis.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!axis.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
{
auto axis_data = AccessorType(axis);
- auto axis_v = generate_random_axis();
- std::copy(axis_v.begin(), axis_v.begin() + axis_shape.x(), static_cast<int32_t *>(axis_data.data()));
+ auto axis_v = generate_random_axis(use_negative_axis);
+ std::copy(axis_v.begin(), axis_v.begin() + axis_shape.total_size(), static_cast<int32_t *>(axis_data.data()));
}
// Compute function
@@ -105,24 +114,25 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type, bool use_negative_axis, bool use_inverted_axis = false)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type };
- SimpleTensor<uint32_t> axis{ axis_shape, DataType::U32 };
+ SimpleTensor<T> src{ shape, data_type };
+ SimpleTensor<int32_t> axis{ axis_shape, DataType::S32 };
// Fill reference
fill(src);
- auto axis_v = generate_random_axis();
- std::copy(axis_v.begin(), axis_v.begin() + axis_shape.x(), axis.data());
+ auto axis_v = generate_random_axis(use_negative_axis);
+ std::copy(axis_v.begin(), axis_v.begin() + axis_shape.total_size(), axis.data());
- return reference::reverse<T>(src, axis);
+ return reference::reverse<T>(src, axis, use_inverted_axis);
}
TensorType _target{};
SimpleTensor<T> _reference{};
+ unsigned int _num_dims{};
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_REVERSE_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_REVERSEFIXTURE_H
diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h
index cf3c5c818f..86d89d71f7 100644
--- a/tests/validation/fixtures/ScaleFixture.h
+++ b/tests/validation/fixtures/ScaleFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,15 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SCALE_FIXTURE
-#define ARM_COMPUTE_TEST_SCALE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_SCALEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_SCALEFIXTURE_H
+
+#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT
#include "tests/framework/Fixture.h"
#include "tests/validation/reference/Permute.h"
#include "tests/validation/reference/Scale.h"
@@ -44,22 +39,23 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ScaleValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy,
- bool align_corners)
+ bool align_corners, bool mixed_layout, QuantizationInfo output_quantization_info)
{
- _shape = shape;
- _policy = policy;
- _border_mode = border_mode;
- _sampling_policy = sampling_policy;
- _data_type = data_type;
- _quantization_info = quantization_info;
- _align_corners = align_corners && _policy == InterpolationPolicy::BILINEAR && _sampling_policy == SamplingPolicy::TOP_LEFT;
+ _shape = shape;
+ _policy = policy;
+ _border_mode = border_mode;
+ _sampling_policy = sampling_policy;
+ _data_type = data_type;
+ _input_quantization_info = quantization_info;
+ _output_quantization_info = output_quantization_info;
+ _align_corners = align_corners;
+ _mixed_layout = mixed_layout;
generate_scale(shape);
- std::mt19937 generator(library->seed());
- std::uniform_int_distribution<uint8_t> distribution_u8(0, 255);
+ std::mt19937 generator(library->seed());
+ std::uniform_int_distribution<uint32_t> distribution_u8(0, 255);
_constant_border_value = static_cast<T>(distribution_u8(generator));
_target = compute_target(shape, data_layout);
@@ -67,6 +63,21 @@ public:
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
void generate_scale(const TensorShape &shape)
{
static constexpr float _min_scale{ 0.25f };
@@ -74,9 +85,8 @@ protected:
constexpr float max_width{ 8192.0f };
constexpr float max_height{ 6384.0f };
-
- const float min_width = _align_corners ? 2.f : 1.f;
- const float min_height = _align_corners ? 2.f : 1.f;
+ const float min_width{ 1.f };
+ const float min_height{ 1.f };
std::mt19937 generator(library->seed());
std::uniform_real_distribution<float> distribution_float(_min_scale, _max_scale);
@@ -99,9 +109,15 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- if(is_data_type_float(_data_type))
+ if(tensor.data_type() == DataType::F32)
{
- library->fill_tensor_uniform(tensor, 0);
+ std::uniform_real_distribution<float> distribution(-5.0f, 5.0f);
+ library->fill(tensor, distribution, 0);
+ }
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -5.0f, 5.0f };
+ library->fill(tensor, distribution, 0);
}
else if(is_data_type_quantized(tensor.data_type()))
{
@@ -110,9 +126,7 @@ protected:
}
else
{
- // Restrict range for float to avoid any floating point issues
- std::uniform_real_distribution<> distribution(-5.0f, 5.0f);
- library->fill(tensor, distribution, 0);
+ library->fill_tensor_uniform(tensor, 0);
}
}
@@ -125,48 +139,56 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _quantization_info, data_layout);
+ TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, data_layout);
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
TensorShape shape_scaled(shape);
- shape_scaled.set(idx_width, shape[idx_width] * _scale_x);
- shape_scaled.set(idx_height, shape[idx_height] * _scale_y);
- TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, _quantization_info, data_layout);
+ shape_scaled.set(idx_width, shape[idx_width] * _scale_x, /* apply_dim_correction = */ false);
+ shape_scaled.set(idx_height, shape[idx_height] * _scale_y, /* apply_dim_correction = */ false);
+ TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, _output_quantization_info, data_layout);
// Create and configure function
FunctionType scale;
- scale.configure(&src, &dst, _policy, _border_mode, _constant_border_value, _sampling_policy, /* use_padding */ true, _align_corners);
+ scale.configure(&src, &dst, ScaleKernelInfo{ _policy, _border_mode, _constant_border_value, _sampling_policy, /* use_padding */ false, _align_corners });
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &src, &dst }, data_layout);
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
- // Compute function
- scale.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(scale, src, dst);
+ }
+ else
+ {
+ // Compute function
+ scale.run();
+ }
return dst;
}
SimpleTensor<T> compute_reference(const TensorShape &shape)
{
// Create reference
- SimpleTensor<T> src{ shape, _data_type, 1, _quantization_info };
+ SimpleTensor<T> src{ shape, _data_type, 1, _input_quantization_info };
// Fill reference
fill(src);
- return reference::scale<T>(src, _scale_x, _scale_y, _policy, _border_mode, _constant_border_value, _sampling_policy, /* ceil_policy_scale */ false, _align_corners);
+ return reference::scale<T>(src, _scale_x, _scale_y, _policy, _border_mode, _constant_border_value, _sampling_policy, /* ceil_policy_scale */ false, _align_corners, _output_quantization_info);
}
TensorType _target{};
@@ -177,17 +199,18 @@ protected:
T _constant_border_value{};
SamplingPolicy _sampling_policy{};
DataType _data_type{};
- QuantizationInfo _quantization_info{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
bool _align_corners{ false };
+ bool _mixed_layout{ false };
float _scale_x{ 1.f };
float _scale_y{ 1.f };
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ScaleValidationQuantizedFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy,
bool align_corners)
{
@@ -198,14 +221,35 @@ public:
policy,
border_mode,
sampling_policy,
- align_corners);
+ align_corners,
+ mixed_layout,
+ quantization_info);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
+class ScaleValidationDifferentOutputQuantizedFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, DataType data_type, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, InterpolationPolicy policy,
+ BorderMode border_mode, SamplingPolicy sampling_policy,
+ bool align_corners)
+ {
+ ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
+ data_type,
+ input_quantization_info,
+ data_layout,
+ policy,
+ border_mode,
+ sampling_policy,
+ align_corners,
+ mixed_layout,
+ output_quantization_info);
+ }
+};
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ScaleValidationFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, bool align_corners)
{
ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
@@ -215,10 +259,12 @@ public:
policy,
border_mode,
sampling_policy,
- align_corners);
+ align_corners,
+ mixed_layout,
+ QuantizationInfo());
}
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SCALE_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_SCALEFIXTURE_H
diff --git a/tests/validation/fixtures/ScatterLayerFixture.h b/tests/validation/fixtures/ScatterLayerFixture.h
new file mode 100644
index 0000000000..af161ef98b
--- /dev/null
+++ b/tests/validation/fixtures/ScatterLayerFixture.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_SCATTERLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_SCATTERLAYERFIXTURE_H
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "tests/Globals.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/ScatterLayer.h"
+#include "tests/SimpleTensor.h"
+
+#include <random>
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ScatterGenericValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape src_shape, TensorShape updates_shape, TensorShape indices_shape,
+ TensorShape out_shape, DataType data_type, ScatterInfo scatter_info, bool inplace, bool padding,
+ QuantizationInfo src_qinfo = QuantizationInfo(), QuantizationInfo o_qinfo = QuantizationInfo())
+ {
+ // this is for improving randomness across tests
+ _hash = src_shape[0] + src_shape[1] + src_shape[2] + src_shape[3] + src_shape[4] + src_shape[5]
+ + updates_shape[0] + updates_shape[1] + updates_shape[2] + updates_shape[3]
+ + updates_shape[4] + updates_shape[5]
+ + indices_shape[0] + indices_shape[1] + indices_shape[2] + indices_shape[3];
+
+ _target = compute_target(src_shape, updates_shape, indices_shape, out_shape, data_type, scatter_info, inplace, padding, src_qinfo, o_qinfo);
+ _reference = compute_reference(src_shape, updates_shape, indices_shape, out_shape, data_type,scatter_info, src_qinfo , o_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F32:
+ case DataType::F16:
+ {
+ std::uniform_real_distribution<float> distribution(-10.f, 10.f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::S32:
+ case DataType::S16:
+ case DataType::S8:
+ {
+ std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::U32:
+ case DataType::U16:
+ case DataType::U8:
+ {
+ std::uniform_int_distribution<uint32_t> distribution(0, 200);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported data type.");
+ }
+ }
+ }
+
+ // This is used to fill indices tensor with S32 datatype.
+ // Used to prevent ONLY having values that are out of bounds.
+ template <typename U>
+ void fill_indices(U &&tensor, int i, const TensorShape &shape)
+ {
+ // Calculate max indices the shape should contain. Add an arbitrary value to allow testing for some out of bounds values (In this case min dimension)
+ const int32_t max = std::min({shape[0] , shape[1], shape[2]}) + 1;
+ library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(0), static_cast<int32_t>(max));
+ }
+
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
+ const TensorShape &out_shape, DataType data_type, const ScatterInfo info, bool inplace, bool padding,
+ QuantizationInfo a_qinfo, QuantizationInfo o_qinfo)
+ {
+ // 1. Create relevant tensors using ScatterInfo data structure.
+ // ----------------------------------------------------
+ // In order - src, updates, indices, output.
+ TensorType src = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo);
+ TensorType updates = create_tensor<TensorType>(shape_b, data_type, 1, a_qinfo);
+ TensorType indices = create_tensor<TensorType>(shape_c, DataType::S32, 1, QuantizationInfo());
+ TensorType dst = create_tensor<TensorType>(out_shape, data_type, 1, o_qinfo);
+
+ FunctionType scatter;
+
+ // Configure operator
+ // When scatter_info.zero_initialization is true, pass nullptr for src
+ // because dst does not need to be initialized with src values.
+ if(info.zero_initialization)
+ {
+ scatter.configure(nullptr, &updates, &indices, &dst, info);
+ }
+ else
+ {
+ if(inplace)
+ {
+ scatter.configure(&src, &updates, &indices, &src, info);
+ }
+ else
+ {
+ scatter.configure(&src, &updates, &indices, &dst, info);
+ }
+ }
+
+ // Assertions
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(updates.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(indices.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ if(padding)
+ {
+ add_padding_x({ &src, &updates, &indices});
+
+ if(!inplace)
+ {
+ add_padding_x({ &dst });
+ }
+ }
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ updates.allocator()->allocate();
+ indices.allocator()->allocate();
+
+ if(!inplace)
+ {
+ dst.allocator()->allocate();
+ }
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!updates.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!indices.info()->is_resizable());
+
+ if(!inplace)
+ {
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
+
+ // Fill update (a) and indices (b) tensors.
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(updates), 1+ _hash);
+ fill_indices(AccessorType(indices), 2 + _hash, out_shape);
+
+ scatter.run();
+
+ if(inplace)
+ {
+ return src;
+ }
+ else
+ {
+ return dst;
+ }
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &a_shape, const TensorShape &b_shape, const TensorShape &c_shape,
+ const TensorShape &out_shape, DataType data_type, ScatterInfo info, QuantizationInfo a_qinfo, QuantizationInfo o_qinfo)
+ {
+ // Output Quantization not currently in use - fixture should be extended to support this.
+ ARM_COMPUTE_UNUSED(o_qinfo);
+ TensorShape src_shape = a_shape;
+ TensorShape updates_shape = b_shape;
+ TensorShape indices_shape = c_shape;
+ const int num_ind_dims = c_shape.num_dimensions();
+
+ // 1. Collapse batch index into a single dim if necessary for update tensor and indices tensor.
+ if(num_ind_dims >= 3)
+ {
+ indices_shape = indices_shape.collapsed_from(1);
+ updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - (num_ind_dims -1)); // Collapses batch dims
+ }
+
+ // 2. Collapse data dims into a single dim.
+ // Collapse all src dims into 2 dims. First one holding data, the other being the index we iterate over.
+ src_shape.collapse(updates_shape.num_dimensions() - 1); // Collapse all data dims into single dim.
+ src_shape = src_shape.collapsed_from(1); // Collapse all index dims into a single dim
+ updates_shape.collapse(updates_shape.num_dimensions() - 1); // Collapse data dims (all except last dim which is batch dim)
+
+ // Create reference tensors
+ SimpleTensor<T> src{ src_shape, data_type, 1, a_qinfo };
+ SimpleTensor<T> updates{updates_shape, data_type, 1, QuantizationInfo() };
+ SimpleTensor<int32_t> indices{ indices_shape, DataType::S32, 1, QuantizationInfo() };
+
+ // Fill reference
+ fill(src, 0 + _hash);
+ fill(updates, 1 + _hash);
+ fill_indices(indices, 2 + _hash, out_shape);
+
+ // Calculate individual reference using collapsed shapes
+ return reference::scatter_layer<T>(src, updates, indices, out_shape, info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ int32_t _hash{};
+};
+
+// This fixture will use the same shape for updates as indices.
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ScatterValidationFixture : public ScatterGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape src_shape, TensorShape update_shape, TensorShape indices_shape,
+ TensorShape out_shape, DataType data_type, ScatterFunction func, bool zero_init, bool inplace, bool padding)
+ {
+ ScatterGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, update_shape,
+ indices_shape, out_shape, data_type, ScatterInfo(func, zero_init), inplace, padding,
+ QuantizationInfo(), QuantizationInfo());
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_SCATTERLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/ScharrFixture.h b/tests/validation/fixtures/ScharrFixture.h
index 36b8e98fb8..b54a9d29e6 100644
--- a/tests/validation/fixtures/ScharrFixture.h
+++ b/tests/validation/fixtures/ScharrFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,7 +66,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ScharrValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, BorderMode border_mode, Format format, GradientDimension gradient_dimension)
{
// Generate a random constant value
@@ -120,18 +119,18 @@ protected:
ARM_COMPUTE_ERROR("Gradient dimension not supported");
}
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_y.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst_x.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst_y.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst_x.allocator()->allocate();
dst_y.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst_y.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst_x.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst_y.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/SelectFixture.h b/tests/validation/fixtures/SelectFixture.h
index 2ddc97a6cf..8cb6f062f9 100644
--- a/tests/validation/fixtures/SelectFixture.h
+++ b/tests/validation/fixtures/SelectFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,7 +63,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SelectValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, bool has_same_same_rank, DataType data_type)
{
TensorShape condition_shape = detail::select_condition_shape(shape, has_same_same_rank);
@@ -97,10 +96,10 @@ protected:
FunctionType select;
select.configure(&c_t, &x_t, &y_t, &dst_t);
- ARM_COMPUTE_EXPECT(c_t.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(x_t.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(y_t.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_t.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(c_t.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(x_t.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(y_t.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst_t.info()->is_resizable());
// Allocate tensors
c_t.allocator()->allocate();
@@ -108,10 +107,10 @@ protected:
y_t.allocator()->allocate();
dst_t.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!c_t.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!x_t.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!y_t.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst_t.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!c_t.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!x_t.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!y_t.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst_t.info()->is_resizable());
// Fill tensors
fill_bool(AccessorType(c_t), 0);
diff --git a/tests/validation/fixtures/SliceOperationsFixtures.h b/tests/validation/fixtures/SliceOperationsFixtures.h
index df016d55e1..b1f91ea2e0 100644
--- a/tests/validation/fixtures/SliceOperationsFixtures.h
+++ b/tests/validation/fixtures/SliceOperationsFixtures.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,6 @@
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
-#include "tests/RawLutAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
@@ -46,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SliceFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, Coordinates starts, Coordinates ends, DataType data_type)
{
_target = compute_target(shape, starts, ends, data_type);
@@ -70,15 +68,15 @@ protected:
FunctionType slice;
slice.configure(&src, &dst, starts, ends);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
@@ -109,7 +107,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class StridedSliceFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape,
Coordinates starts, Coordinates ends, BiStrides strides,
int32_t begin_mask, int32_t end_mask, int32_t shrink_mask,
@@ -139,15 +136,15 @@ protected:
FunctionType strided_slice;
strided_slice.configure(&src, &dst, starts, ends, strides, begin_mask, end_mask, shrink_mask);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
diff --git a/tests/validation/fixtures/SobelFixture.h b/tests/validation/fixtures/SobelFixture.h
deleted file mode 100644
index 2e065e6c8e..0000000000
--- a/tests/validation/fixtures/SobelFixture.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 2017-2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_SOBEL_FIXTURE
-#define ARM_COMPUTE_TEST_SOBEL_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Sobel.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-class CLSobel3x3;
-class CLSobel5x5;
-class CLSobel7x7;
-class NESobel3x3;
-class NESobel5x5;
-class NESobel7x7;
-
-namespace test
-{
-namespace validation
-{
-namespace
-{
-template <typename Function>
-struct info;
-
-template <>
-struct info<NESobel3x3>
-{
- static const Format dst_format = Format::S16;
- static const int filter_size = 3;
-};
-
-template <>
-struct info<CLSobel3x3>
-{
- static const Format dst_format = Format::S16;
- static const int filter_size = 3;
-};
-
-template <>
-struct info<NESobel5x5>
-{
- static const Format dst_format = Format::S16;
- static const int filter_size = 5;
-};
-
-template <>
-struct info<CLSobel5x5>
-{
- static const Format dst_format = Format::S16;
- static const int filter_size = 5;
-};
-
-template <>
-struct info<NESobel7x7>
-{
- static const Format dst_format = Format::S32;
- static const int filter_size = 7;
-};
-
-template <>
-struct info<CLSobel7x7>
-{
- static const Format dst_format = Format::S32;
- static const int filter_size = 7;
-};
-} // namespace
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename U>
-class SobelValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, BorderMode border_mode, Format format, GradientDimension gradient_dimension)
- {
- // Generate a random constant value
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> int_dist(0, 255);
- const uint8_t constant_border_value = int_dist(gen);
-
- _border_mode = border_mode;
- _target = compute_target(shape, border_mode, format, constant_border_value, gradient_dimension);
- _reference = compute_reference(shape, info<FunctionType>::filter_size, border_mode, format, constant_border_value, gradient_dimension);
- }
-
-protected:
- template <typename V>
- void fill(V &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- std::pair<TensorType, TensorType> compute_target(const TensorShape &shape, BorderMode border_mode, Format format, uint8_t constant_border_value, GradientDimension gradient_dimension)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type_from_format(format));
- TensorType dst_x = create_tensor<TensorType>(shape, data_type_from_format(info<FunctionType>::dst_format));
- TensorType dst_y = create_tensor<TensorType>(shape, data_type_from_format(info<FunctionType>::dst_format));
-
- src.info()->set_format(format);
- dst_x.info()->set_format(info<FunctionType>::dst_format);
- dst_y.info()->set_format(info<FunctionType>::dst_format);
-
- FunctionType sobel;
-
- switch(gradient_dimension)
- {
- case GradientDimension::GRAD_X:
- sobel.configure(&src, &dst_x, nullptr, border_mode, constant_border_value);
- break;
- case GradientDimension::GRAD_Y:
- sobel.configure(&src, nullptr, &dst_y, border_mode, constant_border_value);
- break;
- case GradientDimension::GRAD_XY:
- sobel.configure(&src, &dst_x, &dst_y, border_mode, constant_border_value);
- break;
- default:
- ARM_COMPUTE_ERROR("Gradient dimension not supported");
- }
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst_y.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst_x.allocator()->allocate();
- dst_y.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst_x.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst_y.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- sobel.run();
-
- return std::make_pair(std::move(dst_x), std::move(dst_y));
- }
-
- std::pair<SimpleTensor<U>, SimpleTensor<U>> compute_reference(const TensorShape &shape, int filter_size, BorderMode border_mode, Format format, uint8_t constant_border_value,
- GradientDimension gradient_dimension)
- {
- // Create reference
- SimpleTensor<T> src{ shape, format };
-
- // Fill reference
- fill(src);
-
- return reference::sobel<U>(src, filter_size, border_mode, constant_border_value, gradient_dimension);
- }
-
- BorderMode _border_mode{ BorderMode::UNDEFINED };
- std::pair<TensorType, TensorType> _target{};
- std::pair<SimpleTensor<U>, SimpleTensor<U>> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SOBEL_FIXTURE */
diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h
index aeff777776..f4bf8df9c0 100644
--- a/tests/validation/fixtures/SoftmaxLayerFixture.h
+++ b/tests/validation/fixtures/SoftmaxLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,6 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/LogSoftmaxLayer.h"
#include "tests/validation/reference/SoftmaxLayer.h"
#include <random>
@@ -47,29 +46,37 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SoftmaxValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta, size_t axis)
{
_quantization_info = quantization_info;
- _target = compute_target(shape, data_type, quantization_info, beta, axis);
_reference = compute_reference(shape, data_type, quantization_info, beta, axis);
+ _target = compute_target(shape, data_type, quantization_info, beta, axis);
}
protected:
template <typename U>
void fill(U &&tensor)
{
- if(!is_data_type_quantized(tensor.data_type()))
+ if(tensor.data_type() == DataType::F32)
{
- std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
+ std::uniform_real_distribution<float> distribution(-10.0f, 10.0f);
library->fill(tensor, distribution, 0);
}
- else // data type is quantized_asymmetric (signed or unsigned)
+ else if(tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -10.0f, 10.0f };
+ library->fill(tensor, distribution, 0);
+ }
+ else if(!is_data_type_quantized(tensor.data_type()))
{
std::uniform_int_distribution<> distribution(0, 100);
library->fill(tensor, distribution, 0);
}
+ else
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
}
TensorType compute_target(const TensorShape &shape, DataType data_type,
@@ -83,15 +90,15 @@ protected:
FunctionType smx_layer;
smx_layer.configure(&src, &dst, beta, axis);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -111,14 +118,7 @@ protected:
// Fill reference
fill(src);
- if(IS_LOG)
- {
- return reference::log_softmax_layer<T>(src, beta, axis);
- }
- else
- {
- return reference::softmax_layer<T>(src, beta, axis);
- }
+ return reference::softmax_layer<T>(src, beta, axis, IS_LOG);
}
TensorType _target{};
@@ -130,7 +130,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SoftmaxValidationFixture : public SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, float beta, size_t axis)
{
SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG>::setup(shape,
@@ -145,7 +144,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SoftmaxValidationQuantizedFixture : public SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG>
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta, size_t axis)
{
SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG>::setup(shape,
@@ -155,6 +153,7 @@ public:
axis);
}
};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/fixtures/SpaceToBatchFixture.h b/tests/validation/fixtures/SpaceToBatchFixture.h
index d88ecb934a..964e511301 100644
--- a/tests/validation/fixtures/SpaceToBatchFixture.h
+++ b/tests/validation/fixtures/SpaceToBatchFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SpaceToBatchLayerValidationGenericFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape,
DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
{
@@ -79,10 +78,10 @@ protected:
FunctionType space_to_batch;
space_to_batch.configure(&input, &block_shape, &paddings, &output);
- ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(paddings.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(block_shape.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(paddings.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
// Allocate tensors
input.allocator()->allocate();
@@ -90,10 +89,10 @@ protected:
paddings.allocator()->allocate();
output.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!block_shape.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!paddings.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!block_shape.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!paddings.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
// Fill tensors
fill(AccessorType(input), 0);
@@ -140,7 +139,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SpaceToBatchLayerValidationFixture : public SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape,
DataType data_type, DataLayout data_layout)
{
@@ -152,7 +150,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SpaceToBatchLayerValidationQuantizedFixture : public SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape,
DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
{
diff --git a/tests/validation/fixtures/SpaceToDepthFixture.h b/tests/validation/fixtures/SpaceToDepthFixture.h
index 170fdfa397..2d2e9fad7d 100644
--- a/tests/validation/fixtures/SpaceToDepthFixture.h
+++ b/tests/validation/fixtures/SpaceToDepthFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#ifndef ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_FIXTURE
#define ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_FIXTURE
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "tests/Globals.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
@@ -39,7 +40,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class SpaceToDepthLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape output_shape, const int block_shape, DataType data_type, DataLayout data_layout)
{
_target = compute_target(input_shape, output_shape, block_shape, data_type, data_layout);
@@ -50,7 +50,10 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.0f), T(1.0f) };
library->fill(tensor, distribution, i);
}
TensorType compute_target(TensorShape input_shape, TensorShape output_shape, const int block_shape,
@@ -66,19 +69,25 @@ protected:
TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
+ auto calc_out_shape = misc::shape_calculator::compute_space_to_depth_shape(input.info(), block_shape);
+ ARM_COMPUTE_ASSERT(output_shape[0] == calc_out_shape[0]);
+ ARM_COMPUTE_ASSERT(output_shape[1] == calc_out_shape[1]);
+ ARM_COMPUTE_ASSERT(output_shape[2] == calc_out_shape[2]);
+ ARM_COMPUTE_ASSERT(output_shape[3] == calc_out_shape[3]);
+
// Create and configure function
FunctionType space_to_depth;
space_to_depth.configure(&input, &output, block_shape);
- ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
// Allocate tensors
input.allocator()->allocate();
output.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
// Fill tensors
fill(AccessorType(input), 0);
diff --git a/tests/validation/fixtures/SplitFixture.h b/tests/validation/fixtures/SplitFixture.h
index d2336ab2d9..203925329c 100644
--- a/tests/validation/fixtures/SplitFixture.h
+++ b/tests/validation/fixtures/SplitFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,6 @@
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
-#include "tests/RawLutAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
@@ -48,7 +47,6 @@ template <typename TensorType, typename ITensorType, typename AccessorType, type
class SplitFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, unsigned int axis, unsigned int splits, DataType data_type)
{
_target = compute_target(shape, axis, splits, data_type);
@@ -77,7 +75,7 @@ protected:
FunctionType split;
split.configure(&src, dsts_ptr, axis);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
{
return t.info()->is_resizable();
@@ -91,7 +89,7 @@ protected:
dsts[i].allocator()->allocate();
}
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
{
return !t.info()->is_resizable();
@@ -150,7 +148,6 @@ template <typename TensorType, typename ITensorType, typename AccessorType, type
class SplitShapesFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, unsigned int axis, std::vector<TensorShape> split_shapes, DataType data_type)
{
_target = compute_target(shape, axis, split_shapes, data_type);
@@ -186,7 +183,7 @@ protected:
FunctionType split;
split.configure(&src, dsts_ptr, axis);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
{
return t.info()->is_resizable();
@@ -200,7 +197,7 @@ protected:
dsts[i].allocator()->allocate();
}
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
{
return !t.info()->is_resizable();
diff --git a/tests/validation/fixtures/StackLayerFixture.h b/tests/validation/fixtures/StackLayerFixture.h
index cf055b586e..7dd8fe47dc 100644
--- a/tests/validation/fixtures/StackLayerFixture.h
+++ b/tests/validation/fixtures/StackLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_STACKLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_STACKLAYERFIXTURE_H
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorShape.h"
@@ -52,10 +52,9 @@ template <typename TensorType, typename AbstractTensorType, typename AccessorTyp
class StackLayerValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape_src, int axis, DataType data_type, int num_tensors)
{
- _target = compute_target(shape_src, axis, data_type, num_tensors);
+ _target = compute_target(shape_src, axis, data_type, num_tensors, false /* add_x_padding */);
_reference = compute_reference(shape_src, axis, data_type, num_tensors);
}
@@ -66,7 +65,7 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(TensorShape shape_src, int axis, DataType data_type, int num_tensors)
+ TensorType compute_target(TensorShape shape_src, int axis, DataType data_type, int num_tensors, bool add_x_padding)
{
std::vector<TensorType> tensors(num_tensors);
std::vector<AbstractTensorType *> src(num_tensors);
@@ -76,7 +75,7 @@ protected:
{
tensors[i] = create_tensor<TensorType>(shape_src, data_type);
src[i] = &(tensors[i]);
- ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(tensors[i].info()->is_resizable());
}
// Create tensors
@@ -91,18 +90,28 @@ protected:
// Allocate and fill the input tensors
for(int i = 0; i < num_tensors; ++i)
{
- ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+ if(add_x_padding)
+ {
+ add_padding_x({&tensors[i]}, DataLayout::NHWC);
+ }
+
+ ARM_COMPUTE_ASSERT(tensors[i].info()->is_resizable());
tensors[i].allocator()->allocate();
- ARM_COMPUTE_EXPECT(!tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!tensors[i].info()->is_resizable());
// Fill input tensor
fill(AccessorType(tensors[i]), i);
}
+ if(add_x_padding)
+ {
+ add_padding_x({&dst}, DataLayout::NHWC);
+ }
+
// Allocate output tensor
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Compute stack function
stack.run();
@@ -132,7 +141,21 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
};
+
+template <typename TensorType, typename AbstractTensorType, typename AccessorType, typename FunctionType, typename T>
+class StackLayerWithPaddingValidationFixture :
+ public StackLayerValidationFixture<TensorType, AbstractTensorType, AccessorType, FunctionType, T>
+{
+public:
+ using Parent = StackLayerValidationFixture<TensorType, AbstractTensorType, AccessorType, FunctionType, T>;
+
+ void setup(TensorShape shape_src, int axis, DataType data_type, int num_tensors)
+ {
+ Parent::_target = Parent::compute_target(shape_src, axis, data_type, num_tensors, true /* add_x_padding */);
+ Parent::_reference = Parent::compute_reference(shape_src, axis, data_type, num_tensors);
+ }
+};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_STACKLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/TableLookupFixture.h b/tests/validation/fixtures/TableLookupFixture.h
deleted file mode 100644
index 6886c7eca4..0000000000
--- a/tests/validation/fixtures/TableLookupFixture.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_TABLE_LOOKUP_FIXTURE
-#define ARM_COMPUTE_TEST_TABLE_LOOKUP_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/RawLutAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/TableLookup.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename LutAccessorType, typename LutType, typename T>
-class TableLookupValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type)
- {
- _target = compute_target(shape, data_type);
- _reference = compute_reference(shape, data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- library->fill_tensor_uniform(tensor, i);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type)
- {
- // Create Lut
- const int num_elem = (data_type == DataType::U8) ? std::numeric_limits<uint8_t>::max() + 1 : std::numeric_limits<int16_t>::max() - std::numeric_limits<int16_t>::lowest() + 1;
- LutType lut(num_elem, data_type);
-
- //Fill the Lut
- fill_lookuptable(LutAccessorType(lut));
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType table_lookup;
- table_lookup.configure(&src, &lut, &dst);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0);
- fill(AccessorType(dst), 1);
-
- // Compute function
- table_lookup.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type)
- {
- // Create rawLut
- std::map<T, T> rawlut;
-
- // Fill the Lut
- fill_lookuptable(RawLutAccessor<T>(rawlut));
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src, 0);
-
- return reference::table_lookup(src, rawlut);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_TABLE_LOOKUP_FIXTURE */
diff --git a/tests/validation/fixtures/ThresholdFixture.h b/tests/validation/fixtures/ThresholdFixture.h
deleted file mode 100644
index 9a92175728..0000000000
--- a/tests/validation/fixtures/ThresholdFixture.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_THRESHOLD_FIXTURE
-#define ARM_COMPUTE_TEST_THRESHOLD_FIXTURE
-
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Threshold.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ThresholdValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, uint8_t threshold, uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper, DataType data_type)
- {
- _target = compute_target(shape, data_type, threshold, false_value, true_value, type, upper);
- _reference = compute_reference(shape, data_type, threshold, false_value, true_value, type, upper);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type,
- uint8_t threshold, uint8_t false_value, uint8_t true_value,
- ThresholdType type, uint8_t upper)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType thrsh;
- thrsh.configure(&src, &dst, threshold, false_value, true_value, type, upper);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- thrsh.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type,
- uint8_t threshold, uint8_t false_value, uint8_t true_value,
- ThresholdType type, uint8_t upper)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Fill reference
- fill(src);
-
- return reference::threshold<T>(src, threshold, false_value, true_value, type, upper);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_THRESHOLD_FIXTURE */
diff --git a/tests/validation/fixtures/TileFixture.h b/tests/validation/fixtures/TileFixture.h
index cb70a6c160..979eee5ab1 100644
--- a/tests/validation/fixtures/TileFixture.h
+++ b/tests/validation/fixtures/TileFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class TileValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type, const Multiples &multiples)
{
_target = compute_target(shape, data_type, multiples);
@@ -68,15 +67,15 @@ protected:
FunctionType tile_func;
tile_func.configure(&src, &dst, multiples);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
diff --git a/tests/validation/fixtures/TransposeFixture.h b/tests/validation/fixtures/TransposeFixture.h
index c798162a71..212c76cc9a 100644
--- a/tests/validation/fixtures/TransposeFixture.h
+++ b/tests/validation/fixtures/TransposeFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_TRANSPOSE_FIXTURE
-#define ARM_COMPUTE_TEST_TRANSPOSE_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_TRANSPOSEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_TRANSPOSEFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -32,7 +32,7 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Transpose.h"
+#include "tests/validation/reference/Permute.h"
namespace arm_compute
{
@@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class TransposeValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape, DataType data_type)
{
_target = compute_target(shape, data_type);
@@ -71,15 +70,15 @@ protected:
FunctionType trans_func;
trans_func.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src));
@@ -98,7 +97,7 @@ protected:
// Fill reference
fill(src);
- return reference::transpose<T>(src);
+ return reference::permute<T>(src, PermutationVector(1U, 0U));
}
TensorType _target{};
@@ -107,4 +106,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_TRANSPOSE_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_TRANSPOSEFIXTURE_H
diff --git a/tests/validation/fixtures/UNIT/ContextFixture.h b/tests/validation/fixtures/UNIT/ContextFixture.h
new file mode 100644
index 0000000000..77cbc12320
--- /dev/null
+++ b/tests/validation/fixtures/UNIT/ContextFixture.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_UNIT_CONTEXT_FIXTURE
+#define ARM_COMPUTE_TEST_UNIT_CONTEXT_FIXTURE
+
+#include "arm_compute/Acl.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** Test-case for AclDestroyContext
+ *
+ * Validate that AclDestroyContext behaves as expected when invalid inputs as context are given
+ *
+ * Test Steps:
+ * - Call AclDestroyContext with null context
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyContext on empty array
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyContext on an ACL object other than AclContext
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that context is still nullptr
+ */
+template <AclTarget Target>
+class DestroyInvalidContextFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclContext ctx = nullptr;
+ std::array<char, 256> empty_array{};
+ AclContext valid_ctx = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateContext(&valid_ctx, Target, nullptr) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(reinterpret_cast<AclContext>(empty_array.data())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(ctx == nullptr);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(valid_ctx) == AclStatus::AclSuccess);
+ };
+};
+
+/** Test-case for AclCreateContext and AclDestroyContext
+ *
+ * Validate that AclCreateContext can create and destroy a context through the C API
+ *
+ * Test Steps:
+ * - Call AclCreateContext with valid target
+ * - Confirm that context is not nullptr and error code is AclSuccess
+ * - Destroy context
+ * - Confirm that AclSuccess is reported
+ */
+template <AclTarget Target>
+class SimpleContextCApiFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclContext ctx = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateContext(&ctx, Target, nullptr) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(ctx != nullptr);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclSuccess);
+ };
+};
+
+/** Test-case for Context from the C++ interface
+ *
+ * Test Steps:
+ * - Create a Context obejct
+ * - Confirm that StatusCode::Success is reported
+ * - Confirm that equality operator works
+ * - Confirm that inequality operator works
+ */
+template <acl::Target Target>
+class SimpleContextCppApiFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode status = acl::StatusCode::Success;
+ acl::Context ctx(Target, &status);
+ ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
+
+ auto ctx_eq = ctx;
+ ARM_COMPUTE_ASSERT(ctx_eq == ctx);
+
+ acl::Context ctx_ienq(Target, &status);
+ ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success);
+ ARM_COMPUTE_ASSERT(ctx_ienq != ctx);
+ };
+};
+
+/** Test-case for multiple contexes
+ *
+ * Validate that AclCreateContext can create/destroy multiple contexts with different options
+ *
+ * Test Steps:
+ * - Call AclCreateContext with different targets
+ * - Confirm that AclSuccess is reported
+ * - Destroy all contexts
+ * - Confirm that AclSuccess is reported
+ */
+template <AclTarget Target>
+class MultipleContextsFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ const unsigned int num_tests = 5;
+ std::array<AclContext, num_tests> ctxs{};
+ for(unsigned int i = 0; i < num_tests; ++i)
+ {
+ ARM_COMPUTE_ASSERT(AclCreateContext(&ctxs[i], Target, nullptr) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(ctxs[i] != nullptr);
+ ARM_COMPUTE_ASSERT(AclDestroyContext(ctxs[i]) == AclStatus::AclSuccess);
+ }
+ };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_UNIT_CONTEXT_FIXTURE */
diff --git a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
index 86380509ed..3e96dcbf2d 100644
--- a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
+++ b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -74,14 +74,14 @@ public:
void validate(bool validate_finalized) const
{
- ARM_COMPUTE_EXPECT(mm->pool_manager() != nullptr, framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mm->lifetime_manager() != nullptr, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mm->pool_manager() != nullptr);
+ ARM_COMPUTE_ASSERT(mm->lifetime_manager() != nullptr);
if(validate_finalized)
{
- ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized());
}
- ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == num_pools, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == num_pools);
}
AllocatorType allocator;
@@ -127,7 +127,6 @@ class DynamicTensorType3SingleFunction : public framework::Fixture
using T = float;
public:
- template <typename...>
void setup(TensorShape input_level0, TensorShape input_level1)
{
input_l0 = input_level0;
@@ -159,15 +158,15 @@ protected:
SimpleFunctionWrapperType layer(serv_internal.mm);
layer.configure(&src, &dst);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Populate and validate memory manager
serv_cross.populate(num_pools);
@@ -251,7 +250,6 @@ class DynamicTensorType3ComplexFunction : public framework::Fixture
using T = float;
public:
- template <typename...>
void setup(std::vector<TensorShape> input_shapes, TensorShape weights_shape, TensorShape bias_shape, std::vector<TensorShape> output_shapes, PadStrideInfo info)
{
num_iterations = input_shapes.size();
@@ -264,7 +262,7 @@ public:
_info = info;
// Create function
- _f_target = support::cpp14::make_unique<ComplexFunctionType>(_ms.mm);
+ _f_target = std::make_unique<ComplexFunctionType>(_ms.mm);
}
void run_iteration(unsigned int idx)
@@ -313,8 +311,8 @@ protected:
// Create and configure function
_f_target->configure(&src, &_weights_target, &_bias_target, &dst, info, weights_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -322,8 +320,8 @@ protected:
_weights_target.allocator()->allocate();
_bias_target.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
@@ -390,7 +388,6 @@ class DynamicTensorType2PipelineFunction : public framework::Fixture
using T = float;
public:
- template <typename...>
void setup(std::vector<TensorShape> input_shapes)
{
_data_type = DataType::F32;
@@ -408,7 +405,7 @@ protected:
{
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
break;
}
@@ -425,7 +422,7 @@ protected:
for(unsigned int i = 0; i < num_functions; ++i)
{
- _functions.emplace_back(support::cpp14::make_unique<ComplexFunctionType>(_ms.mm));
+ _functions.emplace_back(std::make_unique<ComplexFunctionType>(_ms.mm));
}
for(unsigned int i = 0; i < num_resizes; ++i)
diff --git a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h
index 01f9092da5..3bc4844bd5 100644
--- a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h
+++ b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,8 +100,8 @@ protected:
// Finalize memory manager
mm->populate(_allocator, 1 /* num_pools */);
- ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized());
+ ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == 1);
// Fill tensors
fill(AccessorType(src), 0);
@@ -206,8 +206,8 @@ protected:
// Finalize memory manager
mm->populate(_allocator, 1 /* num_pools */);
- ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized());
+ ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == 1);
// Fill tensors (1st iteration)
fill(AccessorType(src), 0);
@@ -340,8 +340,8 @@ protected:
// Finalize memory manager
mm->populate(_allocator, 1 /* num_pools */);
- ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized());
+ ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == 1);
// Fill tensors (1st iteration)
fill(AccessorType(src), 0);
diff --git a/tests/validation/fixtures/UNIT/QueueFixture.h b/tests/validation/fixtures/UNIT/QueueFixture.h
new file mode 100644
index 0000000000..bc93f5f120
--- /dev/null
+++ b/tests/validation/fixtures/UNIT/QueueFixture.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_UNIT_QUEUE_FIXTURE
+#define ARM_COMPUTE_TEST_UNIT_QUEUE_FIXTURE
+
+#include "arm_compute/Acl.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** Test case for AclCreateQueue
+ *
+ * Validate that AclCreateQueue behaves as expected with invalid context
+ *
+ * Test Steps:
+ * - Call AclCreateQueue with an invalid context
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that the queue is still nullptr
+ */
+class CreateQueueWithInvalidContextFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclQueue queue = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateQueue(&queue, nullptr, nullptr) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(queue == nullptr);
+ };
+};
+
+/** Test-case for AclCreateQueue
+ *
+ * Validate that AclCreateQueue behaves as expected with invalid options
+ *
+ * Test Steps:
+ * - Call AclCreateQueue with valid context but invalid options
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that queue is still nullptr
+ */
+template <acl::Target Target>
+class CreateQueuerWithInvalidOptionsFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+
+ // Check invalid tuning mode
+ AclQueueOptions invalid_queue_opts;
+ invalid_queue_opts.mode = static_cast<AclTuningMode>(-1);
+
+ AclQueue queue = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateQueue(&queue, ctx.get(), &invalid_queue_opts) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(queue == nullptr);
+ };
+};
+
+/** Test case for AclDestroyQueue
+*
+* Validate that AclDestroyQueue behaves as expected when an invalid queue is given
+*
+* Test Steps:
+* - Call AclDestroyQueue with null queue
+* - Confirm that AclInvalidArgument is reported
+* - Call AclDestroyQueue on empty array
+* - Confirm that AclInvalidArgument is reported
+* - Call AclDestroyQueue on an ACL object other than AclQueue
+* - Confirm that AclInvalidArgument is reported
+* - Confirm that queue is still nullptr
+*/
+template <acl::Target Target>
+class DestroyInvalidQueueFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+
+ std::array<char, 256> empty_array{};
+ AclQueue queue = nullptr;
+
+ ARM_COMPUTE_ASSERT(AclDestroyQueue(queue) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyQueue(reinterpret_cast<AclQueue>(ctx.get())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyQueue(reinterpret_cast<AclQueue>(empty_array.data())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(queue == nullptr);
+ };
+};
+
+/** Test case for AclCreateQueue
+ *
+ * Validate that a queue can be created successfully
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid queue
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class SimpleQueueFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::Queue queue(ctx, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_UNIT_QUEUE_FIXTURE */
diff --git a/tests/validation/fixtures/UNIT/TensorFixture.h b/tests/validation/fixtures/UNIT/TensorFixture.h
new file mode 100644
index 0000000000..bfe115b3ed
--- /dev/null
+++ b/tests/validation/fixtures/UNIT/TensorFixture.h
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_UNIT_TENSOR_FIXTURE
+#define ARM_COMPUTE_TEST_UNIT_TENSOR_FIXTURE
+
+#include "arm_compute/Acl.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** Test case for AclCreateTensor
+ *
+ * Validate that AclCreateTensor behaves as expected with invalid context
+ *
+ * Test Steps:
+ * - Call AclCreateTensor with an invalid context
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that the tensor is still nullptr
+ */
+class CreateTensorWithInvalidContextFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclTensor tensor = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, nullptr, nullptr, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+ };
+};
+
+/** Test-case for AclCreateTensor
+ *
+ * Validate that AclCreateTensor behaves as expected on invalid descriptor
+ *
+ * Test Steps:
+ * - Call AclCreateTensor with valid context but invalid descriptor
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that tensor is still nullptr
+ */
+template <acl::Target Target>
+class CreateTensorWithInvalidDescriptorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+ AclTensor tensor = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), nullptr, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+
+ // Check invalid data type
+ AclTensorDescriptor invalid_desc;
+ invalid_desc.ndims = 4;
+ invalid_desc.data_type = static_cast<AclDataType>(-1);
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), &invalid_desc, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+
+ // Check invalid number of dimensions
+ invalid_desc.data_type = AclDataType::AclFloat32;
+ invalid_desc.ndims = 15;
+ ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), &invalid_desc, false) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+ };
+};
+
+/** Test case for AclDestroyTensor
+*
+* Validate that AclDestroyTensor behaves as expected when an invalid tensor is given
+*
+* Test Steps:
+* - Call AclDestroyTensor with null tensor
+* - Confirm that AclInvalidArgument is reported
+* - Call AclDestroyTensor on empty array
+* - Confirm that AclInvalidArgument is reported
+* - Call AclDestroyTensor on an ACL object other than AclTensor
+* - Confirm that AclInvalidArgument is reported
+* - Confirm that tensor is still nullptr
+*/
+template <acl::Target Target>
+class DestroyInvalidTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+
+ std::array<char, 256> empty_array{};
+ AclTensor tensor = nullptr;
+
+ ARM_COMPUTE_ASSERT(AclDestroyTensor(tensor) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensor(reinterpret_cast<AclTensor>(ctx.get())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensor(reinterpret_cast<AclTensor>(empty_array.data())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(tensor == nullptr);
+ };
+};
+
+/** Test case for AclCreateTensor
+ *
+ * Validate that a tensor can be created successfully
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class SimpleTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ };
+};
+
+/** Test case for AclTensor
+ *
+ * Validate that multiple tensors can be created successfully
+ * Stress the possibility of memory leaks
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a lot of tensors
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class TensorStressFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ const unsigned int num_tensors = 1024;
+ for(unsigned int i = 0; i < num_tensors; ++i)
+ {
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 1024, 1024 }, acl::DataType::Float32), &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ }
+ };
+};
+
+/** Test case for AclMapTensor
+ *
+ * Validate that map on an invalid object fails
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Pass and invalid object for mapping
+ * - Confirm that AclInvalidArgument is returned
+ */
+template <acl::Target Target>
+class MapInvalidTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ void *handle = nullptr;
+ ARM_COMPUTE_ASSERT(AclMapTensor(reinterpret_cast<AclTensor>(ctx.get()), &handle) == AclStatus::AclInvalidArgument);
+ };
+};
+
+/** Test case for AclMapTensor
+ *
+ * Validate that map of an unallocated pointer is nullptr
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor without allocating
+ * - Map tensor
+ * - Check that mapping is nullptr
+ */
+template <acl::Target Target>
+class MapNotAllocatedTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 8, 8 }, acl::DataType::Float32), false /* allocate */, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ ARM_COMPUTE_ASSERT(tensor.map() == nullptr);
+ };
+};
+
+/** Test case for AclMapTensor
+ *
+ * Validate that map of a valid tensor return a non-nullptr value
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor while allocating
+ * - Map tensor
+ * - Check that mapping is not nullptr
+ */
+template <acl::Target Target>
+class MapAllocatedTensorFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 8, 8 }, acl::DataType::Float32), &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ void *handle = tensor.map();
+ ARM_COMPUTE_ASSERT(handle != nullptr);
+ ARM_COMPUTE_ASSERT(tensor.unmap(handle) == acl::StatusCode::Success);
+ };
+};
+
+/** Test case for AclTensorImport
+ *
+ * Validate that an externally memory can be successfully imported
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor without allocating
+ * - Allocate external memory
+ * - Import memory to the tensor
+ * - Check that imported pointer matches
+ */
+template <acl::Target Target>
+class ImportMemoryFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ const int32_t size = 8;
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ size }, acl::DataType::Float32), false /* allocate */, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ std::vector<float> data(size);
+ err = tensor.import(data.data(), acl::ImportType::Host);
+
+ void *handle = tensor.map();
+ ARM_COMPUTE_ASSERT(handle == data.data());
+ ARM_COMPUTE_ASSERT(tensor.unmap(handle) == acl::StatusCode::Success);
+ }
+};
+/** Test case for get_size() interface of Tensor
+ *
+ *
+ * Test Steps:
+ * - Create a valid context
+ * - Create a valid tensor
+ * - Compare the size value returned with the expected value
+ */
+template <acl::Target Target>
+class TensorSizeFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+
+ // size should be 6 elements (2x3) times 4 bytes (float32) = 24 bytes
+ constexpr size_t expected_size = 24;
+ ARM_COMPUTE_ASSERT(tensor.get_size() == expected_size);
+ };
+};
+/** Test case for get_size() dealing with invalid arguments
+ *
+ * Test Steps:
+ * - Test nullptr tensor can return a correct error
+ * - Create a valid tensor
+ * - Test C interface with null size argument can return a correct error
+ */
+template <acl::Target Target>
+class InvalidTensorSizeFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ // Null tensor
+ AclTensor null_tensor = nullptr;
+ uint64_t size{ 0 };
+ ARM_COMPUTE_ASSERT(AclGetTensorSize(null_tensor, &size) == AclStatus::AclInvalidArgument);
+
+ // Create valid tensor
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+
+ // Null size argument
+ ARM_COMPUTE_ASSERT(AclGetTensorSize(tensor.get(), nullptr) == AclStatus::AclInvalidArgument);
+ };
+};
+
+template <acl::Target Target>
+class DescriptorConversionFixture : public framework::Fixture
+{
+ bool compare_descriptor(const AclTensorDescriptor &desc_a, const AclTensorDescriptor &desc_b)
+ {
+ auto are_descriptors_same = true;
+
+ are_descriptors_same &= desc_a.ndims == desc_b.ndims;
+ are_descriptors_same &= desc_a.data_type == desc_b.data_type;
+ are_descriptors_same &= desc_a.shape != nullptr && desc_b.shape != nullptr;
+
+ for(int32_t d = 0; d < desc_a.ndims; ++d)
+ {
+ are_descriptors_same &= desc_a.shape[d] == desc_b.shape[d];
+ }
+
+ // other attributes should be added here
+
+ return are_descriptors_same;
+ }
+
+public:
+ void setup()
+ {
+ auto err{ acl::StatusCode::Success };
+ auto ctx{ acl::Context(Target, &err) };
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ auto desc{ acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32) };
+ acl::Tensor tensor(ctx, desc, &err);
+
+ auto desc_from_tensor = tensor.get_descriptor();
+
+ ARM_COMPUTE_ASSERT(compare_descriptor(*desc.get(), *desc_from_tensor.get()));
+ ARM_COMPUTE_ASSERT(desc == desc_from_tensor);
+
+ // Test c interface with "prepopulated" descriptor
+ // Note: When c interface used, there are possibility of memory leak
+ // if members are not correctly deleted (e.g., shape).
+ // Since that is considered user's responsibility, we don't test here.
+ AclTensorDescriptor prepopulated_descriptor
+ {
+ 3, nullptr, AclDataType::AclBFloat16, nullptr, 0
+ };
+
+ ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(tensor.get(), &prepopulated_descriptor) == AclStatus::AclSuccess);
+ ARM_COMPUTE_ASSERT(compare_descriptor(*desc.get(), prepopulated_descriptor));
+ ARM_COMPUTE_ASSERT(desc == acl::TensorDescriptor(prepopulated_descriptor));
+ };
+};
+
+template <acl::Target Target>
+class InvalidDescriptorConversionFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ // Null tensor
+ AclTensor null_tensor = nullptr;
+ AclTensorDescriptor desc{};
+ ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(null_tensor, &desc) == AclStatus::AclInvalidArgument);
+
+ // Create valid tensor
+ acl::StatusCode err = acl::StatusCode::Success;
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err);
+
+ // Null size argument
+ ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(tensor.get(), nullptr) == AclStatus::AclInvalidArgument);
+ };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_UNIT_TENSOR_FIXTURE */
diff --git a/tests/validation/fixtures/UNIT/TensorPackFixture.h b/tests/validation/fixtures/UNIT/TensorPackFixture.h
new file mode 100644
index 0000000000..bc14631936
--- /dev/null
+++ b/tests/validation/fixtures/UNIT/TensorPackFixture.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_UNIT_TENSORPACK_FIXTURE
+#define ARM_COMPUTE_TEST_UNIT_TENSORPACK_FIXTURE
+
+#include "arm_compute/Acl.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/** Test case for AclCreateTensorPack
+ *
+ * Validate that AclCreateTensorPack behaves as expected with invalid context
+ *
+ * Test Steps:
+ * - Call AclCreateTensorPack with an invalid context
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that the tensor pack is still nullptr
+ */
+class CreateTensorPackWithInvalidContextFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ AclTensorPack pack = nullptr;
+ ARM_COMPUTE_ASSERT(AclCreateTensorPack(&pack, nullptr) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(pack == nullptr);
+ };
+};
+
+/** Test case for AclDestroyTensorPack
+ *
+ * Validate that AclDestroyTensorPack behaves as expected when an invalid tensor pack is given
+ *
+ * Test Steps:
+ * - Call AclDestroyTensorPack with null tensor pack
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyTensorPack on empty array
+ * - Confirm that AclInvalidArgument is reported
+ * - Call AclDestroyTensorPack on an ACL object other than AclTensorPack
+ * - Confirm that AclInvalidArgument is reported
+ * - Confirm that tensor pack is still nullptr
+ */
+template <acl::Target Target>
+class DestroyInvalidTensorPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+
+ std::array<char, 256> empty_array{};
+ AclTensorPack pack = nullptr;
+
+ ARM_COMPUTE_ASSERT(AclDestroyTensorPack(pack) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensorPack(reinterpret_cast<AclTensorPack>(ctx.get())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(AclDestroyTensorPack(reinterpret_cast<AclTensorPack>(empty_array.data())) == AclStatus::AclInvalidArgument);
+ ARM_COMPUTE_ASSERT(pack == nullptr);
+ };
+};
+
+/** Test case for AclPackTensor
+ *
+ * Validate that AclPackTensor behaves as expected when an invalid is being passed for packing
+ *
+ * Test Steps:
+ * - Create a valid TensorPack
+ * - Try to pack an empty object
+ * - Confirm that AclInvalidArgument is reported
+ * - Try to pack another API object other than tensor
+ * - Confirm that AclInvalidArgument is reported
+ */
+template <acl::Target Target>
+class AddInvalidObjectToTensorPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ auto err = acl::StatusCode::Success;
+
+ acl::Context ctx(Target, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::TensorPack pack(ctx, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ auto status = AclPackTensor(pack.get(),
+ reinterpret_cast<AclTensor>(ctx.get()),
+ AclTensorSlot::AclSrc);
+ ARM_COMPUTE_ASSERT(status == AclInvalidArgument);
+
+ status = AclPackTensor(pack.get(), nullptr, AclTensorSlot::AclSrc);
+ ARM_COMPUTE_ASSERT(status == AclInvalidArgument);
+ };
+};
+
+/** Test case for AclPackTensor
+ *
+ * Validate that a tensor can be added successfully to the TensorPack
+ *
+ * Test Steps:
+ * - Create a valid tensor pack
+ * - Create a valid tensor
+ * - Add tensor to the tensor pack
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class SimpleTensorPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+ acl::TensorPack pack(ctx);
+ acl::Tensor t(ctx, acl::TensorDescriptor({ 3, 3, 5, 7 }, acl::DataType::Float32));
+
+ ARM_COMPUTE_ASSERT(pack.add(t, AclTensorSlot::AclSrc) == acl::StatusCode::Success);
+ };
+};
+
+/** Test case for AclPackTensor
+ *
+ * Validate that multiple tensor can be added successfully to the TensorPack
+ *
+ * Test Steps:
+ * - Create a valid tensor pack
+ * - Create a list of valid tensors
+ * - Add tensors to the tensor pack
+ * - Confirm that AclSuccess is returned
+ */
+template <acl::Target Target>
+class MultipleTensorsInPackFixture : public framework::Fixture
+{
+public:
+ void setup()
+ {
+ acl::Context ctx(Target);
+ acl::TensorPack pack(ctx);
+
+ const acl::TensorDescriptor desc(acl::TensorDescriptor({ 3, 3, 5, 7 }, acl::DataType::Float32));
+ const size_t num_tensors = 256;
+
+ std::vector<acl::Tensor> tensors;
+ for(unsigned int i = 0; i < num_tensors; ++i)
+ {
+ auto err = acl::StatusCode::Success;
+ tensors.emplace_back(acl::Tensor(ctx, desc, &err));
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ ARM_COMPUTE_ASSERT(pack.add(tensors.back(), static_cast<int32_t>(AclTensorSlot::AclSrcVec) + i) == acl::StatusCode::Success);
+ }
+ };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_UNIT_TENSORPACK_FIXTURE */
diff --git a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
index b17c003342..f5e6071340 100644
--- a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
+++ b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,17 +64,20 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- std::uniform_real_distribution<> distribution(0.5f, 1.f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(0.5f), T(1.0f) };
library->fill(tensor, distribution, i);
}
TensorType compute_target()
{
// Create tensors
- TensorType w1 = create_tensor<TensorType>(TensorShape(180000U, 150U), DataType::F32, 1);
- TensorType b1 = create_tensor<TensorType>(TensorShape(150U), DataType::F32, 1);
- TensorType src = create_tensor<TensorType>(TensorShape(1U, 150U, 1200U, _max_batches), DataType::F32, 1);
- TensorType dst = create_tensor<TensorType>(TensorShape(150U, _max_batches), DataType::F32, 1);
+ TensorType w1 = create_tensor<TensorType>(TensorShape(6000U, 15U), DataType::F32, 1);
+ TensorType b1 = create_tensor<TensorType>(TensorShape(15U), DataType::F32, 1);
+ TensorType src = create_tensor<TensorType>(TensorShape(1U, 15U, 400U, _max_batches), DataType::F32, 1);
+ TensorType dst = create_tensor<TensorType>(TensorShape(15U, _max_batches), DataType::F32, 1);
// Create and configure function
FullyConnectedFunction fc_layer_1;
@@ -102,9 +105,9 @@ protected:
int diff = _max_batches - _cur_batches;
auto new_src_padding = PaddingSize(src_padding.top, src_padding.right, src_padding.bottom + diff, src_padding.left);
auto new_dst_padding = PaddingSize(dst_padding.top, dst_padding.right, dst_padding.bottom + diff, dst_padding.left);
- src.allocator()->info().set_tensor_shape(TensorShape(1U, 150U, 1200U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding);
+ src.allocator()->info().set_tensor_shape(TensorShape(1U, 15U, 400U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding);
src.allocator()->info().set_is_resizable(false);
- dst.allocator()->info().set_tensor_shape(TensorShape(150U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding);
+ dst.allocator()->info().set_tensor_shape(TensorShape(15U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding);
dst.allocator()->info().set_is_resizable(false);
// Configure FC info
@@ -126,16 +129,16 @@ protected:
SimpleTensor<T> compute_reference()
{
// Create reference
- SimpleTensor<T> w1{ TensorShape(180000U, 150U), DataType::F32 };
- SimpleTensor<T> b1{ TensorShape(150U), DataType::F32 };
- SimpleTensor<T> src{ TensorShape(1U, 150U, 1200U, _cur_batches), DataType::F32 };
+ SimpleTensor<T> w1{ TensorShape(6000U, 15U), DataType::F32 };
+ SimpleTensor<T> b1{ TensorShape(15U), DataType::F32 };
+ SimpleTensor<T> src{ TensorShape(1U, 15U, 400U, _cur_batches), DataType::F32 };
// Fill reference
fill(src, 5);
fill(w1, 1);
fill(b1, 2);
- return reference::fully_connected_layer(src, w1, b1, TensorShape(150U, _cur_batches));
+ return reference::fully_connected_layer(src, w1, b1, TensorShape(15U, _cur_batches));
}
protected:
diff --git a/tests/validation/fixtures/UnstackFixture.h b/tests/validation/fixtures/UnstackFixture.h
index f20a128415..30b7dd5539 100644
--- a/tests/validation/fixtures/UnstackFixture.h
+++ b/tests/validation/fixtures/UnstackFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,6 @@ template <typename TensorType, typename ITensorType, typename AccessorType, type
class UnstackValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, int axis, int num, DataType data_type)
{
_target = compute_target(input_shape, axis, num, data_type);
@@ -80,10 +79,10 @@ protected:
for(auto &out : output_slices)
{
out.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!out.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!out.info()->is_resizable());
}
input_tensor.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!input_tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!input_tensor.info()->is_resizable());
fill(AccessorType(input_tensor), 0);
// Compute function
unstack.run();
diff --git a/tests/validation/fixtures/UpsampleLayerFixture.h b/tests/validation/fixtures/UpsampleLayerFixture.h
deleted file mode 100644
index e46e19a635..0000000000
--- a/tests/validation/fixtures/UpsampleLayerFixture.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2018-2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/UpsampleLayer.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class UpsampleLayerFixtureBase : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, DataType data_type, DataLayout data_layout,
- Size2D info, const InterpolationPolicy &policy, QuantizationInfo quantization_info)
- {
- _data_type = data_type;
-
- _target = compute_target(input_shape, info, policy, data_type, data_layout, quantization_info);
- _reference = compute_reference(input_shape, info, policy, data_type, quantization_info);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int i)
- {
- library->fill_tensor_uniform(tensor, i);
- }
-
- TensorType compute_target(TensorShape input_shape, const Size2D &info, const InterpolationPolicy &policy,
- DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
- {
- TensorShape output_shape(input_shape);
- output_shape.set(0, info.x() * input_shape[0]);
- output_shape.set(1, info.y() * input_shape[1]);
-
- if(data_layout == DataLayout::NHWC)
- {
- permute(input_shape, PermutationVector(2U, 0U, 1U));
- permute(output_shape, PermutationVector(2U, 0U, 1U));
- }
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
-
- // Create and configure function
- FunctionType upsample;
- upsample.configure(&src, &dst, info, policy);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0);
-
- // Compute DeconvolutionLayer function
- upsample.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const Size2D &info, const InterpolationPolicy &policy,
- DataType data_type, QuantizationInfo quantization_info)
- {
- // Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
-
- // Fill reference
- fill(src, 0);
-
- return reference::upsample_layer<T>(src, info, policy);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- DataType _data_type{};
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class UpsampleLayerFixture : public UpsampleLayerFixtureBase<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, DataType data_type, DataLayout data_layout,
- Size2D info, const InterpolationPolicy &policy)
- {
- UpsampleLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, data_type, data_layout,
- info, policy, QuantizationInfo());
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class UpsampleLayerQuantizedFixture : public UpsampleLayerFixtureBase<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, DataType data_type, DataLayout data_layout,
- Size2D info, const InterpolationPolicy &policy, QuantizationInfo quantization_info)
- {
- UpsampleLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, data_type, data_layout,
- info, policy, quantization_info);
- }
-};
-
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/fixtures/WarpAffineFixture.h b/tests/validation/fixtures/WarpAffineFixture.h
deleted file mode 100644
index 3cbf86f92d..0000000000
--- a/tests/validation/fixtures/WarpAffineFixture.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_WARP_AFFINE_FIXTURE
-#define ARM_COMPUTE_TEST_WARP_AFFINE_FIXTURE
-
-#include <memory>
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Utils.h"
-#include "tests/validation/reference/WarpAffine.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class WarpAffineValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType data_type, InterpolationPolicy policy, BorderMode border_mode)
- {
- // Generate a random constant value if border_mode is constant
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution_u8(0, 255);
- uint8_t constant_border_value = distribution_u8(gen);
-
- // Create the matrix
- std::array<float, 9> matrix{ {} };
- fill_warp_matrix<9>(matrix);
-
- _target = compute_target(shape, data_type, matrix, policy, border_mode, constant_border_value);
- _reference = compute_reference(shape, data_type, matrix, policy, border_mode, constant_border_value);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType warp_affine;
- warp_affine.configure(&src, &dst, matrix, policy, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- warp_affine.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Create the valid mask Tensor
- _valid_mask = SimpleTensor<T>(shape, data_type);
-
- // Fill reference
- fill(src);
-
- return reference::warp_affine<T>(src, _valid_mask, matrix.data(), policy, border_mode, constant_border_value);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- SimpleTensor<T> _valid_mask{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WARP_AFFINE_FIXTURE */
diff --git a/tests/validation/fixtures/WarpPerspectiveFixture.h b/tests/validation/fixtures/WarpPerspectiveFixture.h
deleted file mode 100644
index aa84946e94..0000000000
--- a/tests/validation/fixtures/WarpPerspectiveFixture.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_WARP_PERSPECTIVE_FIXTURE
-#define ARM_COMPUTE_TEST_WARP_PERSPECTIVE_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/Utils.h"
-#include "tests/validation/reference/WarpPerspective.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class WarpPerspectiveValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, DataType data_type, InterpolationPolicy policy, BorderMode border_mode)
- {
- uint8_t constant_border_value = 0;
- // Generate a random constant value if border_mode is constant
- if(border_mode == BorderMode::CONSTANT)
- {
- std::mt19937 gen(library->seed());
- std::uniform_int_distribution<uint8_t> distribution_u8(0, 255);
- constant_border_value = distribution_u8(gen);
- }
-
- // Create the matrix
- std::array<float, 9> matrix = { { 0 } };
- fill_warp_matrix<9>(matrix);
-
- _target = compute_target(input_shape, matrix, policy, border_mode, constant_border_value, data_type);
- _reference = compute_reference(input_shape, matrix, policy, border_mode, constant_border_value, data_type);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
-
- TensorType compute_target(const TensorShape &shape, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
- uint8_t constant_border_value,
- DataType data_type)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
-
- // Create and configure function
- FunctionType warp_perspective;
- warp_perspective.configure(&src, &dst, matrix, policy, border_mode, constant_border_value);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- warp_perspective.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
- uint8_t constant_border_value,
- DataType data_type)
- {
- ARM_COMPUTE_ERROR_ON(data_type != DataType::U8);
-
- // Create reference
- SimpleTensor<T> src{ shape, data_type };
-
- // Create the valid mask Tensor
- _valid_mask = SimpleTensor<T>(shape, data_type);
-
- // Fill reference
- fill(src);
-
- // Compute reference
- return reference::warp_perspective<T>(src, _valid_mask, matrix.data(), policy, border_mode, constant_border_value);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- BorderMode _border_mode{};
- SimpleTensor<T> _valid_mask{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WARP_PERSPECTIVE_FIXTURE */
diff --git a/tests/validation/fixtures/WeightsReshapeFixture.h b/tests/validation/fixtures/WeightsReshapeFixture.h
index 06765f6c21..68bd8b689d 100644
--- a/tests/validation/fixtures/WeightsReshapeFixture.h
+++ b/tests/validation/fixtures/WeightsReshapeFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,10 +45,9 @@ namespace validation
using namespace arm_compute::misc::shape_calculator;
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class WeightsReshapeValidationFixture : public framework::Fixture
+class WeightsReshapeOpValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, DataType data_type, bool has_bias, unsigned int num_groups)
{
const TensorShape output_shape = compute_weights_reshaped_shape(TensorInfo(input_shape, 1, data_type), has_bias, num_groups);
@@ -73,34 +72,44 @@ protected:
// Create and configure function
FunctionType weights_reshape_func;
- weights_reshape_func.configure(&src, (has_bias ? &bias : nullptr), &dst, num_groups);
+ weights_reshape_func.configure(src.info(), (has_bias ? bias.info() : nullptr), dst.info(), num_groups);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0);
if(has_bias)
{
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
bias.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
fill(AccessorType(bias), 1);
}
+ arm_compute::ITensorPack pack =
+ {
+ { arm_compute::TensorType::ACL_SRC, &src },
+ { arm_compute::TensorType::ACL_DST, &dst }
+ };
+
+ if(has_bias)
+ {
+ pack.add_const_tensor(arm_compute::TensorType::ACL_BIAS, &bias);
+ }
// Compute function
- weights_reshape_func.run();
+ weights_reshape_func.run(pack);
return dst;
}
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index 9c2df9ef4b..20b678b36c 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -51,125 +51,36 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
-class WinogradConvolutionLayerValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false>
+class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info)
+ DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
+
{
ARM_COMPUTE_UNUSED(dilation);
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
+ _mixed_layout = mixed_layout;
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
}
protected:
- template <typename U>
- void fill(U &&tensor, int i, float min, float max)
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
- switch(tensor.data_type())
- {
- case DataType::F16:
- case DataType::F32:
- {
- std::uniform_real_distribution<> distribution(min, max);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
- }
-
- TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
-
- // Create and configure function
- FunctionType conv;
- ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
- conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- dst.allocator()->allocate();
- bias.allocator()->allocate();
+ // Compute Convolution function
+ layer.run();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0, -1.f, 1.f);
- fill(AccessorType(weights), 1, -1.f, 1.f);
- fill(AccessorType(bias), 2, -1.f, 1.f);
-
- // Compute Winograd Convolution function
- conv.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
- {
- // Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1 };
- SimpleTensor<T> weights{ weights_shape, data_type, 1 };
- SimpleTensor<T> bias{ bias_shape, data_type, 1 };
-
- // Fill reference
- fill(src, 0, -1.f, 1.f);
- fill(weights, 1, -1.f, 1.f);
- if(use_bias)
- {
- fill(bias, 2, -1.f, 1.f);
- }
- else
- {
- fill(bias, 2, 0.f, 0.f);
- }
-
- SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
-
- return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
}
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true>
-class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
-
- {
- ARM_COMPUTE_UNUSED(dilation);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
- }
-
-protected:
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -177,13 +88,13 @@ protected:
{
case DataType::F16:
{
- arm_compute::utils::uniform_real_distribution_fp16 distribution((half)min, (half)max);
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
library->fill(tensor, distribution, i);
break;
}
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(min, max);
+ std::uniform_real_distribution<float> distribution(min, max);
library->fill(tensor, distribution, i);
break;
}
@@ -216,10 +127,12 @@ protected:
framework::LogLevel::ERRORS);
conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
// Allocate tensors
src.allocator()->allocate();
@@ -227,19 +140,25 @@ protected:
dst.allocator()->allocate();
bias.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0, -0.5f, 0.5f);
fill(AccessorType(weights), 1, -0.5f, 0.5f);
fill(AccessorType(bias), 2, -0.5f, 0.5f);
- // Compute Winograd Convolution function
- conv.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute function
+ conv.run();
+ }
return dst;
}
@@ -310,37 +229,59 @@ protected:
SimpleTensor<T1> filter_transform_out = reference::winograd_filter_transform<T1>(weights_t1, filter_transform_shape, winograd_info);
SimpleTensor<T1> batched_gemm = reference::gemm<T1>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
SimpleTensor<T1> conv_out = reference::winograd_output_transform<T1>(batched_gemm, bias_t1, output_transform_shape, winograd_info);
- SimpleTensor<T> conv_out_t(std::move(copy_tensor<T, T1>(conv_out)));
+ SimpleTensor<T> conv_out_t(copy_tensor<T, T1>(conv_out));
return (act_info.enabled()) ? reference::activation_layer<T>(conv_out_t, act_info) : conv_out_t;
}
TensorType _target{};
SimpleTensor<T> _reference{};
+ bool _mixed_layout{ false };
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradInputTransformValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
{
TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
-
- _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
- _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
+ _mixed_layout = mixed_layout;
+ _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
switch(tensor.data_type())
{
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(min, max);
+ std::uniform_real_distribution<float> distribution(min, max);
library->fill(tensor, distribution, i);
break;
}
@@ -365,22 +306,30 @@ protected:
FunctionType transf;
transf.configure(&src, &dst, winograd_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({ &src, &dst }, data_layout);
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- // Compute Winograd input transform function
- transf.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(transf, src, dst);
+ }
+ else
+ {
+ // Compute Winograd input transform function
+ transf.run();
+ }
return dst;
}
@@ -395,34 +344,57 @@ protected:
return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout{ false };
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradFilterTransformValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
{
WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
- _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
- _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
+ _mixed_layout = mixed_layout;
+ _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
switch(tensor.data_type())
{
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(min, max);
+ std::uniform_real_distribution<float> distribution(min, max);
library->fill(tensor, distribution, i);
break;
}
@@ -448,21 +420,30 @@ protected:
FunctionType filter_transform;
filter_transform.configure(&src, &dst, winograd_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({ &src, &dst }, data_layout);
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- filter_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(filter_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd filter transform function
+ filter_transform.run();
+ }
return dst;
}
@@ -477,15 +458,15 @@ protected:
return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout{ false };
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradOutputTransformValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo())
{
_target = compute_target(input_shape, winograd_info, data_type, act_info);
@@ -493,15 +474,37 @@ public:
}
protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
switch(tensor.data_type())
{
case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F32:
{
- std::uniform_real_distribution<> distribution(min, max);
+ std::uniform_real_distribution<float> distribution(min, max);
library->fill(tensor, distribution, i);
break;
}
@@ -525,25 +528,34 @@ protected:
FunctionType output_transform;
output_transform.configure(&src, &bias, &dst, winograd_info, act_info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout);
// Allocate tensors
src.allocator()->allocate();
bias.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
fill(AccessorType(bias), 1, -1.f, 1.f);
- output_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(output_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd output transform function
+ output_transform.run();
+ }
return dst;
}
@@ -565,10 +577,11 @@ protected:
return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
}
+ bool _mixed_layout{ false };
TensorType _target{};
SimpleTensor<T> _reference{};
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/YOLOLayerFixture.h b/tests/validation/fixtures/YOLOLayerFixture.h
deleted file mode 100644
index a3842e1e8a..0000000000
--- a/tests/validation/fixtures/YOLOLayerFixture.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_YOLO_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_YOLO_LAYER_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/YOLOLayer.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class YOLOValidationGenericFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, int32_t num_classes, DataLayout data_layout, DataType data_type,
- QuantizationInfo quantization_info)
- {
- _data_type = data_type;
- _function = function;
-
- ActivationLayerInfo info(function, alpha_beta, alpha_beta);
-
- _target = compute_target(shape, in_place, info, num_classes, data_layout, data_type, quantization_info);
- _reference = compute_reference(shape, info, num_classes, data_type, quantization_info);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor)
- {
- float min_bound = 0;
- float max_bound = 0;
- std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
- std::uniform_real_distribution<> distribution(min_bound, max_bound);
- library->fill(tensor, distribution, 0);
- }
-
- TensorType compute_target(TensorShape shape, bool in_place, const ActivationLayerInfo &info, int32_t num_classes, DataLayout data_layout, DataType data_type, QuantizationInfo quantization_info)
- {
- if(data_layout == DataLayout::NHWC)
- {
- permute(shape, PermutationVector(2U, 0U, 1U));
- }
-
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info, data_layout);
- TensorType dst = create_tensor<TensorType>(shape, data_type, 1, quantization_info, data_layout);
-
- // Create and configure function
- FunctionType yolo_layer;
-
- TensorType *dst_ptr = in_place ? &src : &dst;
-
- yolo_layer.configure(&src, dst_ptr, info, num_classes);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- if(!in_place)
- {
- dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- }
-
- // Fill tensors
- fill(AccessorType(src));
-
- // Compute function
- yolo_layer.run();
-
- if(in_place)
- {
- return src;
- }
- else
- {
- return dst;
- }
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, const ActivationLayerInfo &info, int32_t num_classes, DataType data_type, QuantizationInfo quantization_info)
- {
- // Create reference
- SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
-
- // Fill reference
- fill(src);
-
- return reference::yolo_layer<T>(src, info, num_classes);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- DataType _data_type{};
- ActivationLayerInfo::ActivationFunction _function{};
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class YOLOValidationFixture : public YOLOValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, int32_t num_classes, DataLayout data_layout, DataType data_type)
- {
- YOLOValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, num_classes, data_layout, data_type, QuantizationInfo());
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class YOLOValidationQuantizedFixture : public YOLOValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, int32_t num_classes, DataLayout data_layout, DataType data_type,
- QuantizationInfo quantization_info)
- {
- YOLOValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, num_classes, data_layout, data_type, quantization_info);
- }
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif // ARM_COMPUTE_TEST_YOLO_LAYER_FIXTURE
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h
new file mode 100644
index 0000000000..ca4de11a15
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DEPTHWISECONV2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DEPTHWISECONV2DFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/reference/DepthwiseConvolutionLayer.h"
+#include "tests/validation/Validation.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuDepthwiseConv2dValidationGenericFixture : public framework::Fixture
+{
+public:
+ using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value ||
+ std::is_same<typename std::decay<T>::type, int8_t>::value,
+ int32_t,
+ T>::type; // If T: uint8_t or int8_t then TBias: int32_t, otherwise TBias: T
+
+ void setup(TensorShape input_shape,
+ Size2D kernel_size,
+ const PadStrideInfo &pad_stride,
+ const Size2D &dilation,
+ const unsigned int depth_multiplier,
+ const DataType data_type,
+ const DataLayout data_layout)
+ {
+ ARM_COMPUTE_ERROR_ON(data_layout !=
+ DataLayout::NHWC); // Dynamic fusion depthwise conv2d only supports NHWC layout
+
+ DepthwiseConv2dAttributes dwc_conv2d_attr;
+ const Padding2D padding_2d(pad_stride.pad_left(), pad_stride.pad_right(), pad_stride.pad_top(),
+ pad_stride.pad_bottom());
+ dwc_conv2d_attr.pad(padding_2d)
+ .stride(Size2D(pad_stride.stride().first, pad_stride.stride().second))
+ .dilation(dilation)
+ .depth_multiplier(depth_multiplier)
+ .dimension_rounding_type(pad_stride.round());
+
+ // Calculate Output and Weight Shapes
+ TensorShape weights_shape = TensorShape(kernel_size.width, kernel_size.height);
+
+ const TensorInfo in_info(input_shape, 1, data_type);
+ const TensorInfo we_info(weights_shape, 1, data_type);
+
+ const ConvolutionInfo info{pad_stride, depth_multiplier, ActivationLayerInfo(), dilation};
+ const TensorShape output_shape =
+ misc::shape_calculator::compute_depthwise_convolution_shape(in_info, we_info, info);
+
+ weights_shape.set(2, output_shape.z());
+ const TensorShape bias_shape = TensorShape(weights_shape[2]);
+
+ _data_type = data_type;
+ _data_layout = data_layout;
+ _target = compute_target(input_shape, weights_shape, bias_shape, dwc_conv2d_attr);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, dwc_conv2d_attr);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch (tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ // Given input is in nchw format
+ TensorType compute_target(TensorShape input_shape,
+ TensorShape weights_shape,
+ const TensorShape &bias_shape,
+ const DepthwiseConv2dAttributes dwc_conv2d_attr)
+ {
+ ARM_COMPUTE_ERROR_ON(_data_layout != DataLayout::NHWC);
+
+ // Our test shapes are assumed in NCHW data layout, thus the permutation
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ ITensorInfo *input_info = context.create_tensor_info(TensorInfo(input_shape, 1, _data_type, _data_layout));
+ ITensorInfo *weight_info = context.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout));
+ ITensorInfo *bias_info = context.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout));
+ ITensorInfo *dst_info = context.create_tensor_info();
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, weight_info, bias_info, dwc_conv2d_attr);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+
+ // Construct user tensors
+ TensorType t_input{};
+ TensorType t_weight{};
+ TensorType t_bias{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_input.allocator()->init(*input_info);
+ t_weight.allocator()->init(*weight_info);
+ t_bias.allocator()->init(*bias_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_input.allocator()->allocate();
+ t_weight.allocator()->allocate();
+ t_bias.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_input), 0);
+ fill(AccessorType(t_weight), 1);
+ fill(AccessorType(t_bias), 2);
+
+ // Run runtime
+ runtime.run({&t_input, &t_weight, &t_bias, &t_dst});
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape,
+ const TensorShape &weights_shape,
+ const TensorShape &bias_shape,
+ const TensorShape &output_shape,
+ DepthwiseConv2dAttributes dwc_conv2d_attr)
+ {
+ // Create reference
+ SimpleTensor<T> src{input_shape, _data_type, 1};
+ SimpleTensor<T> weight{weights_shape, _data_type, 1};
+ SimpleTensor<TBias> bias{bias_shape, _data_type, 1};
+
+ fill(src, 0);
+ fill(weight, 1);
+ fill(bias, 2);
+
+ auto src_nchw = src;
+ auto weights_nchw = weight;
+ auto bias_nchw = bias;
+ auto output_shape_nchw = output_shape;
+
+ PadStrideInfo legacy_pad_stride(dwc_conv2d_attr.stride().x(), dwc_conv2d_attr.stride().y(),
+ dwc_conv2d_attr.pad().left, dwc_conv2d_attr.pad().right,
+ dwc_conv2d_attr.pad().top, dwc_conv2d_attr.pad().bottom,
+ DimensionRoundingType{});
+ auto dst_nchw =
+ reference::depthwise_convolution(src_nchw, weights_nchw, bias_nchw, output_shape_nchw, legacy_pad_stride,
+ dwc_conv2d_attr.depth_multiplier(), dwc_conv2d_attr.dilation());
+ return dst_nchw;
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuDepthwiseConv2dValidationFixture
+ : public DynamicFusionGpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape,
+ Size2D kernel_size,
+ const PadStrideInfo &info,
+ const Size2D &dilation,
+ const unsigned int depth_multiplier,
+ DataType data_type,
+ DataLayout data_layout)
+ {
+ DynamicFusionGpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ input_shape, kernel_size, info, dilation, depth_multiplier, data_type, data_layout);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DEPTHWISECONV2DFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
new file mode 100644
index 0000000000..1f4e223b93
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
@@ -0,0 +1,411 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DIRECTCONV2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DIRECTCONV2DFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/Conv2dAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/reference/ConvolutionLayer.h"
+#include "tests/validation/reference/Permute.h"
+#include "tests/validation/Validation.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+template <typename U>
+void fill(U &&tensor, int i)
+{
+ switch (tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+}
+
+} // namespace
+
+/** General Conv2d fixture
+ * Adapted from tests/validation/fixtures/ConvolutionLayerFixture.h
+ * TODO: Parameterize to be fully backend agnostic: COMPMID-5760; remove Gpu from name
+ */
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuConv2dValidationGenericFixture : public framework::Fixture
+{
+public:
+ using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value ||
+ std::is_same<typename std::decay<T>::type, int8_t>::value,
+ int32_t,
+ T>::type; // If T: uint8_t or int8_t then TBias: int32_t, otherwise TBias: T
+
+ void setup(TensorShape input_shape,
+ TensorShape weights_shape,
+ TensorShape bias_shape,
+ TensorShape output_shape,
+ const PadStrideInfo &info,
+ const Size2D &dilation,
+ DataType data_type,
+ DataLayout data_layout,
+ QuantizationInfo quantization_info,
+ QuantizationInfo weight_quantization_info)
+ {
+ ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); // Dynamic fusion conv2d only supports NHWC layout
+ const Conv2dAttributes conv2d_attr = convert_pad_stride_info_to_conv_attr(info, dilation);
+ _data_type = data_type;
+ _data_layout = data_layout;
+ _is_quantized = is_data_type_quantized_asymmetric(data_type);
+ _quantization_info = quantization_info;
+ _weight_quantization_info = weight_quantization_info;
+ _bias_data_type = _is_quantized ? DataType::S32 : data_type;
+ _target = compute_target(input_shape, weights_shape, bias_shape, conv2d_attr);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, conv2d_attr);
+ }
+
+protected:
+ // Given input is in nchw format
+ TensorType compute_target(TensorShape input_shape,
+ TensorShape weights_shape,
+ const TensorShape &bias_shape,
+ Conv2dAttributes conv2d_attr)
+ {
+ ARM_COMPUTE_ERROR_ON(_data_layout != DataLayout::NHWC);
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ CLScheduler::get().default_reinit();
+
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ ITensorInfo *input_info = context.create_tensor_info(TensorInfo(input_shape, 1, _data_type, _data_layout));
+ ITensorInfo *weight_info = context.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout));
+ ITensorInfo *bias_info = context.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout));
+ ITensorInfo *dst_info = context.create_tensor_info();
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, weight_info, bias_info, conv2d_attr);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+ // Construct user tensors
+ TensorType t_input{};
+ TensorType t_weight{};
+ TensorType t_bias{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_input.allocator()->init(*input_info);
+ t_weight.allocator()->init(*weight_info);
+ t_bias.allocator()->init(*bias_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_input.allocator()->allocate();
+ t_weight.allocator()->allocate();
+ t_bias.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_input), 0);
+ fill(AccessorType(t_weight), 1);
+ fill(AccessorType(t_bias), 2);
+
+ // Run runtime
+ runtime.run({&t_input, &t_weight, &t_bias, &t_dst});
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape,
+ const TensorShape &weights_shape,
+ const TensorShape &bias_shape,
+ const TensorShape &output_shape,
+ Conv2dAttributes conv2d_attr)
+ {
+ // Create reference
+ SimpleTensor<T> src{input_shape, _data_type, 1, _quantization_info};
+ SimpleTensor<T> weight{weights_shape, _data_type, 1, _weight_quantization_info};
+ SimpleTensor<TBias> bias{bias_shape, _data_type, 1, _quantization_info};
+
+ fill(src, 0);
+ fill(weight, 1);
+ fill(bias, 2);
+
+ auto src_nchw = src;
+ auto weights_nchw = weight;
+ auto bias_nchw = bias;
+ auto output_shape_nchw = output_shape;
+
+ PadStrideInfo legacy_pad_stride(conv2d_attr.stride().x(), conv2d_attr.stride().y(), conv2d_attr.pad().left,
+ conv2d_attr.pad().right, conv2d_attr.pad().top, conv2d_attr.pad().bottom,
+ DimensionRoundingType{});
+ auto dst_nchw = reference::convolution_layer(src_nchw, weights_nchw, bias_nchw, output_shape_nchw,
+ legacy_pad_stride, conv2d_attr.dilation());
+ return dst_nchw;
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataType _bias_data_type{};
+ DataLayout _data_layout{};
+ QuantizationInfo _quantization_info{};
+ QuantizationInfo _weight_quantization_info{};
+ bool _is_quantized = false;
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuConv2dValidationFixture
+ : public DynamicFusionGpuConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape,
+ TensorShape weights_shape,
+ TensorShape output_shape,
+ TensorShape bias_shape,
+ const PadStrideInfo &info,
+ const Size2D &dialation,
+ DataType data_type,
+ DataLayout data_layout,
+ QuantizationInfo quantization_info)
+ {
+ DynamicFusionGpuConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ input_shape, weights_shape, output_shape, bias_shape, info, dialation, data_type, data_layout,
+ quantization_info, quantization_info);
+ }
+};
+
+/** Specific Conv2d method: Direct Conv2d fixture
+ * Adapted from tests/validation/fixtures/DirectConvolutionLayerFixture.h
+ * TODO: Parameterize to be fully backend agnostic: COMPMID-5760
+ */
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionDirectConv2dValidationGenericFixture : public framework::Fixture
+{
+public:
+ using TBias =
+ typename std::conditional<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T>::type;
+
+ void setup(TensorShape input_shape,
+ int stride_x,
+ int stride_y,
+ int pad_x,
+ int pad_y,
+ unsigned int kernel_size,
+ unsigned int num_kernels,
+ DataType data_type,
+ QuantizationInfo quantization_info,
+ DataLayout data_layout)
+ {
+ ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); // Dynamic fusion conv2d only supports NHWC layout
+
+ TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
+ const TensorShape bias_shape(num_kernels);
+ const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
+ const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+
+ const Conv2dAttributes conv2d_attr = convert_pad_stride_info_to_conv_attr(info, {1U, 1U} /* dilation */);
+
+ TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
+ TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
+
+ const TensorShape output_shape =
+ misc::shape_calculator::compute_deep_convolution_shape(input_info, weights_info, info);
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, conv2d_attr, data_type,
+ bias_data_type, quantization_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type,
+ bias_data_type, quantization_info);
+ }
+
+protected:
+ TensorType compute_target(TensorShape input_shape,
+ TensorShape weights_shape,
+ const TensorShape &bias_shape,
+ TensorShape output_shape,
+ const Conv2dAttributes &conv2d_attr,
+ DataType data_type,
+ DataType bias_data_type,
+ QuantizationInfo quantization_info,
+ const DataLayout &data_layout)
+ {
+ ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC);
+ ARM_COMPUTE_UNUSED(quantization_info);
+ // Dataset shapes are in NCHW layout
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ auto input_info = context.create_tensor_info(TensorInfo(input_shape, 1, data_type, data_layout));
+ auto weight_info = context.create_tensor_info(TensorInfo(weights_shape, 1, data_type, data_layout));
+ auto bias_info = context.create_tensor_info(TensorInfo(bias_shape, 1, bias_data_type, data_layout));
+ auto dst_info = context.create_tensor_info();
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, weight_info, bias_info, conv2d_attr);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+ // Construct user tensors
+ TensorType t_input{};
+ TensorType t_weight{};
+ TensorType t_bias{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_input.allocator()->init(*input_info);
+ t_weight.allocator()->init(*weight_info);
+ t_bias.allocator()->init(*bias_info);
+ t_dst.allocator()->init(*dst_info);
+
+ ARM_COMPUTE_ASSERT(t_input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t_weight.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t_dst.info()->is_resizable());
+
+ // Allocate and fill user tensors
+ t_input.allocator()->allocate();
+ t_weight.allocator()->allocate();
+ t_bias.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!t_input.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!t_weight.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!t_bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!t_dst.info()->is_resizable());
+
+ fill(AccessorType(t_input), 0);
+ fill(AccessorType(t_weight), 1);
+ fill(AccessorType(t_bias), 2);
+
+ // Run runtime
+ runtime.run({&t_input, &t_weight, &t_bias, &t_dst});
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape,
+ const TensorShape &weights_shape,
+ const TensorShape &bias_shape,
+ const TensorShape &output_shape,
+ const PadStrideInfo &info,
+ DataType data_type,
+ DataType bias_data_type,
+ QuantizationInfo quantization_info)
+ {
+ // Create reference
+ SimpleTensor<T> src{input_shape, data_type, 1, quantization_info};
+ SimpleTensor<T> weights{weights_shape, data_type, 1, quantization_info};
+ SimpleTensor<TBias> bias{bias_shape, bias_data_type, 1, quantization_info};
+
+ // Fill reference
+ fill(src, 0);
+ fill(weights, 1);
+ fill(bias, 2);
+
+ SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
+ return dst;
+ }
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionDirectConv2dValidationFixture
+ : public DynamicFusionDirectConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape,
+ int stride_x,
+ int stride_y,
+ int pad_x,
+ int pad_y,
+ unsigned int kernel_size,
+ unsigned int num_kernels,
+ DataType data_type,
+ DataLayout data_layout)
+ {
+ DynamicFusionDirectConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
+ data_layout);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DIRECTCONV2DFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h
new file mode 100644
index 0000000000..69bd0efbdc
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_ELEMENTWISEBINARYFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_ELEMENTWISEBINARYFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/reference/ElementwiseOperations.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuElementwiseBinaryValidationGenericFixture : public framework::Fixture
+{
+public:
+ void setup(ArithmeticOperation ref_op,
+ const TensorShape &shape0,
+ const TensorShape &shape1,
+ const TensorShape &shape2,
+ DataType data_type,
+ bool is_inplace,
+ bool fuse_two_ops = false)
+ {
+ _ref_op = ref_op;
+ _is_inplace = is_inplace;
+ _data_type = data_type;
+ _fuse = fuse_two_ops;
+ ARM_COMPUTE_ERROR_ON_MSG(_fuse && shape2.total_size() == 0, "No shape2 provided for fusion of two ops.");
+ ARM_COMPUTE_ERROR_ON_MSG(_fuse && _is_inplace, "In place for fusing case not supported yet.");
+ _target = compute_target(shape0, shape1, shape2);
+ _reference = compute_reference(shape0, shape1, shape2);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ if (is_data_type_float(tensor.data_type()))
+ {
+ switch (_ref_op)
+ {
+ case ArithmeticOperation::DIV:
+ library->fill_tensor_uniform_ranged(tensor, i, {std::pair<float, float>(-0.001f, 0.001f)});
+ break;
+ case ArithmeticOperation::POWER:
+ library->fill_tensor_uniform(tensor, i, 0.0f, 5.0f);
+ break;
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+ else if (tensor.data_type() == DataType::S32)
+ {
+ switch (_ref_op)
+ {
+ case ArithmeticOperation::DIV:
+ library->fill_tensor_uniform_ranged(tensor, i, {std::pair<int32_t, int32_t>(-1U, 1U)});
+ break;
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2)
+ {
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Fuse first element wise binary Op
+ ITensorInfo *lhs_info = context.create_tensor_info(TensorInfo(shape0, 1, _data_type));
+ ITensorInfo *rhs_info = context.create_tensor_info(TensorInfo(shape1, 1, _data_type));
+ ITensorInfo *dst_info = context.create_tensor_info();
+
+ ITensorInfo *rhs_info_fuse = nullptr;
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, lhs_info, rhs_info);
+
+ if (_fuse)
+ {
+ rhs_info_fuse = context.create_tensor_info(TensorInfo(shape2, 1, _data_type));
+ ITensorInfo *ans2_info = FunctionType::create_op(sketch, ans_info, rhs_info_fuse);
+ GpuOutput::create_op(sketch, ans2_info, dst_info);
+ }
+ else
+ {
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+ }
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+
+ // Construct user tensors
+ TensorType t_lhs{};
+ TensorType t_rhs{};
+ TensorType t_rhs_fuse{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_lhs.allocator()->init(*lhs_info);
+ t_rhs.allocator()->init(*rhs_info);
+ t_dst.allocator()->init(*dst_info);
+ if (_fuse)
+ {
+ t_rhs_fuse.allocator()->init(*rhs_info_fuse);
+ }
+
+ // Allocate and fill user tensors
+ // Instead of using ACL allocator, the user can choose to import memory into the tensors
+ t_lhs.allocator()->allocate();
+ t_rhs.allocator()->allocate();
+ t_dst.allocator()->allocate();
+ if (_fuse)
+ {
+ t_rhs_fuse.allocator()->allocate();
+ }
+
+ fill(AccessorType(t_lhs), 0);
+ fill(AccessorType(t_rhs), 1);
+ if (_fuse)
+ {
+ fill(AccessorType(t_rhs_fuse), 2);
+ }
+
+ // Run runtime
+ if (_fuse)
+ {
+ runtime.run({&t_lhs, &t_rhs, &t_rhs_fuse, &t_dst});
+ }
+ else
+ {
+ runtime.run({&t_lhs, &t_rhs, &t_dst});
+ }
+
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2)
+ {
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ const TensorShape out_shape_fuse = TensorShape::broadcast_shape(out_shape, shape1);
+
+ // Create reference
+ SimpleTensor<T> ref_lhs{shape0, _data_type, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_rhs{shape1, _data_type, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_rhs_fuse{shape2, _data_type, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_dst{out_shape, _data_type, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_dst_fuse{out_shape_fuse, _data_type, 1, QuantizationInfo()};
+
+ // Fill reference
+ fill(ref_lhs, 0);
+ fill(ref_rhs, 1);
+
+ reference::arithmetic_operation<T>(_ref_op, ref_lhs, ref_rhs, ref_dst, ConvertPolicy::WRAP);
+ if (_fuse)
+ {
+ fill(ref_rhs_fuse, 2);
+ reference::arithmetic_operation<T>(_ref_op, ref_dst, ref_rhs_fuse, ref_dst_fuse, ConvertPolicy::WRAP);
+ }
+ SimpleTensor<T> *ret = _fuse ? &ref_dst_fuse : &ref_dst;
+ return *ret;
+ }
+
+ ArithmeticOperation _ref_op{ArithmeticOperation::ADD};
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+ bool _is_inplace{false};
+ bool _fuse{false};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuElementwiseBinaryOneOpValidationFixture
+ : public DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(ArithmeticOperation ref_op, const TensorShape &shape0, DataType data_type, bool is_inplace)
+ {
+ DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ ref_op, shape0, shape0, TensorShape(), data_type, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuElementwiseBinaryBroadcastOneOpValidationFixture
+ : public DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(ArithmeticOperation ref_op,
+ const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType data_type,
+ bool is_inplace)
+ {
+ DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ ref_op, shape0, shape1, TensorShape(), data_type, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuElementwiseBinaryTwoOpsValidationFixture
+ : public DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(ArithmeticOperation ref_op,
+ const TensorShape &shape0,
+ const TensorShape &shape1,
+ const TensorShape &shape2,
+ DataType data_type,
+ bool is_inplace,
+ bool fuse_two_ops)
+ {
+ DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ ref_op, shape0, shape1, shape2, data_type, is_inplace, fuse_two_ops);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_ELEMENTWISEBINARYFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h
new file mode 100644
index 0000000000..4c1cc94d3d
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_MATMULKERNELFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_MATMULKERNELFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/MatMulAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMatMul.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/GEMM.h"
+#include "tests/validation/reference/Permute.h"
+#include "tests/validation/reference/ReshapeLayer.h"
+#include "tests/validation/Validation.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+template <typename U>
+void fill(U &&tensor, int i)
+{
+ switch (tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+}
+
+} // namespace
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuMatMulValidationGenericFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape lhs_shape,
+ TensorShape rhs_shape,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ int M0,
+ int N0,
+ int K0,
+ bool export_rhs_to_cl_image,
+ DataType data_type)
+ {
+ //For brevity, the input shapes are assumed to be not-transposed for both a and b matrices.
+ if (transpose_a)
+ {
+ permute(lhs_shape, PermutationVector(1U, 0U));
+ }
+ if (transpose_b)
+ {
+ permute(rhs_shape, PermutationVector(1U, 0U));
+ }
+
+ // Skip configurations unsupported by the device.
+ _device_supports_export_to_cl_image = image2d_from_buffer_supported(CLKernelLibrary::get().get_device());
+ if (!_device_supports_export_to_cl_image && export_rhs_to_cl_image)
+ {
+ ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
+ framework::ARM_COMPUTE_PRINT_INFO();
+ return; // Note: Also need to skip the validate in corresponding FIXTURE_DATA_TEST_CASEs.
+ }
+
+ _target = compute_target(lhs_shape, rhs_shape, transpose_a, transpose_b, M0, N0, K0, export_rhs_to_cl_image,
+ data_type);
+ _reference = compute_reference(lhs_shape, rhs_shape, output_shape, transpose_a, transpose_b, data_type);
+ }
+
+protected:
+ TensorType compute_target(TensorShape &shape_a,
+ TensorShape &shape_b,
+ bool transpose_a,
+ bool transpose_b,
+ int M0,
+ int N0,
+ int K0,
+ bool export_rhs_to_cl_image,
+ DataType data_type)
+ {
+ ARM_COMPUTE_UNUSED(export_rhs_to_cl_image);
+ CLScheduler::get().default_reinit();
+
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ ITensorInfo *lhs_info = context.create_tensor_info(TensorInfo(shape_a, 1, data_type));
+ ITensorInfo *rhs_info = context.create_tensor_info(TensorInfo(shape_b, 1, data_type));
+ ITensorInfo *dst_info = context.create_tensor_info();
+
+ MatMulAttributes matmul_attr{};
+ matmul_attr.adj_lhs(transpose_a);
+ matmul_attr.adj_rhs(transpose_b);
+
+ GpuMatMulSettings matmul_settings{};
+ matmul_settings.m0(M0);
+ matmul_settings.n0(N0);
+ matmul_settings.k0(K0);
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, lhs_info, rhs_info, matmul_attr, matmul_settings);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+
+ // Construct user tensors
+ TensorType t_lhs{};
+ TensorType t_rhs{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_lhs.allocator()->init(*lhs_info);
+ t_rhs.allocator()->init(*rhs_info);
+ t_dst.allocator()->init(*dst_info);
+
+ ARM_COMPUTE_ASSERT(t_lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t_rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(t_dst.info()->is_resizable());
+
+ // Allocate and fill user tensors
+ t_lhs.allocator()->allocate();
+ t_rhs.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!t_lhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!t_rhs.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!t_dst.info()->is_resizable());
+
+ fill(AccessorType(t_lhs), 0);
+ fill(AccessorType(t_rhs), 1);
+
+ // Run runtime
+ runtime.run({&t_lhs, &t_rhs, &t_dst});
+
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape_a,
+ const TensorShape &shape_b,
+ const TensorShape &output_shape,
+ bool pretranspose_a,
+ bool pretranspose_b,
+ DataType data_type)
+ {
+ // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 3D
+ // This is necessary unless we choose to extend gemm reference for 5D+ tensors
+ TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ);
+ TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimZ);
+ TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimZ);
+
+ // Create reference
+ SimpleTensor<T> a{shape_a_collapsed, data_type, 1};
+ SimpleTensor<T> b{shape_b_collapsed, data_type, 1};
+ SimpleTensor<T> c{output_shape_collapsed, data_type, 1};
+
+ // Fill reference
+ fill(a, 0);
+ fill(b, 1);
+
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+
+ // Define transposed shapes
+ TensorShape a_transposed_shape(a.shape());
+ a_transposed_shape.set(0, a.shape().y());
+ a_transposed_shape.set(1, a.shape().x());
+
+ TensorShape b_transposed_shape(b.shape());
+ b_transposed_shape.set(0, b.shape().y());
+ b_transposed_shape.set(1, b.shape().x());
+
+ // Define transposed tensors
+ SimpleTensor<T> a_transposed{a_transposed_shape, data_type};
+ SimpleTensor<T> b_transposed{b_transposed_shape, data_type};
+
+ //pretranspose a if necessary
+ if (pretranspose_a)
+ {
+ a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U));
+ }
+
+ // pretranspose b if necessary
+ if (pretranspose_b)
+ {
+ b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U));
+ }
+
+ // Use transposed tensors if boolean enabled else use original tensors
+ SimpleTensor<T> result =
+ reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, 1.0f, 0.f);
+
+ // We reshape the gemm output back if the tensor is high dimensional
+ if (output_shape_collapsed != output_shape)
+ {
+ // std::cout << "called reshape: \n";
+ result = reference::reshape_layer(result, output_shape);
+ }
+
+ return result;
+ }
+
+ CLTensor _target{};
+ SimpleTensor<T> _reference{};
+ bool _device_supports_export_to_cl_image{false};
+ bool _device_supports_mmul{false};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuMatMulValidationFixture
+ : public DynamicFusionGpuMatMulValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape lhs_shape,
+ TensorShape rhs_shape,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ int M0,
+ int N0,
+ int K0,
+ bool export_rhs_to_cl_image,
+ DataType data_type)
+ {
+ ARM_COMPUTE_UNUSED(export_rhs_to_cl_image);
+ DynamicFusionGpuMatMulValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ lhs_shape, rhs_shape, output_shape, transpose_a, transpose_b, M0, N0, K0,
+ false /* export_rhs_to_cl_image bias */, data_type);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_MATMULKERNELFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h
new file mode 100644
index 0000000000..b0c7143d91
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h"
+
+#include "src/dynamic_fusion/utils/Utils.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/PoolingLayer.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuPool2dValidationGenericFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape, const Pool2dAttributes &pool_attr, DataType data_type)
+ {
+ _target = compute_target(input_shape, pool_attr, data_type);
+ _reference = compute_reference(
+ input_shape, convert_pool_attr_to_pool_info(pool_attr, true /* mixed_precision */), data_type);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch (tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ // Given input is in nchw format
+ TensorType compute_target(TensorShape input_shape, const Pool2dAttributes &pool_attr, const DataType data_type)
+ {
+ CLScheduler::get().default_reinit();
+
+ // Change shape due to NHWC data layout, test shapes are NCHW
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ auto input_info = context.create_tensor_info(TensorInfo(input_shape, 1, data_type, DataLayout::NHWC));
+ auto dst_info = context.create_tensor_info();
+
+ // Create Pool2dSettings
+ GpuPool2dSettings pool_settings = GpuPool2dSettings();
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, pool_attr, pool_settings);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+ // Construct user tensors
+ TensorType t_input{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_input.allocator()->init(*input_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_input.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_input), 0);
+
+ // Run runtime
+ runtime.run({&t_input, &t_dst});
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type)
+ {
+ // Create reference
+ SimpleTensor<T> src(shape, data_type, 1, QuantizationInfo());
+ // Fill reference
+ fill(src, 0);
+ return reference::pooling_layer<T>(src, pool_info, QuantizationInfo(), nullptr, DataLayout::NCHW);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuPool2dValidationFixture
+ : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape,
+ PoolingType pool_type,
+ Size2D pool_size,
+ Padding2D pad,
+ Size2D stride,
+ bool exclude_padding,
+ DataType data_type)
+ {
+ DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ input_shape,
+ Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding(
+ exclude_padding),
+ data_type);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuPool2dSpecialValidationFixture
+ : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape input_shape, Pool2dAttributes pool_attr, DataType data_type)
+ {
+ DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ input_shape, pool_attr, data_type);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ActivationFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ActivationFixture.h
new file mode 100644
index 0000000000..c9ffbccbc7
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/ActivationFixture.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename... TArgs>
+class DynamicFusionActivationValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape, bool fuse, DataType data_type, ActivationLayerInfo act_info, TArgs... args)
+ {
+ _fuse = fuse;
+ _data_type = data_type;
+ _function = act_info.activation();
+ _target = compute_target(shape, args...);
+ _reference = compute_reference(shape, act_info);
+ }
+
+protected:
+ std::vector<T> get_boundary_values(T min, T max)
+ {
+ // This function will return a vector filled with the following values that can
+ // represent two partitions derived from equivalent partitioning.
+ // * Lower partition: min, min + delta, lower quarter (nominal), center - delta
+ // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
+ const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1);
+ const auto center_value = (min + max) / 2;
+ const auto lower_quarter = (min + center_value) / 2;
+ const auto upper_quarter = (center_value + max) / 2;
+
+ std::vector<T> boundary_values{};
+
+ // To ensure all the inserted values are within the given range after subtracing/adding delta
+ auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
+ {
+ for (auto &v : new_values)
+ {
+ if (v >= min && v <= max)
+ {
+ boundary_values.emplace_back(v);
+ }
+ }
+ };
+
+ insert_values({min, static_cast<T>(min + delta), static_cast<T>(lower_quarter),
+ static_cast<T>(center_value - delta)}); // lower partition
+ insert_values({static_cast<T>(center_value), static_cast<T>(center_value + delta),
+ static_cast<T>(upper_quarter), static_cast<T>(max - delta), max}); // upper partition
+
+ return boundary_values;
+ }
+
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ float min_bound = 0;
+ float max_bound = 0;
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
+ library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
+ }
+
+ TensorType compute_target(const TensorShape &shape, TArgs... args)
+ {
+ // Create a new workload sketch
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext context{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ ITensorInfo *src_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type));
+ ITensorInfo *dst_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type));
+
+ ITensorInfo *ans_0_info = FunctionType::create_op(sketch, src_info, args...);
+ if (_fuse)
+ {
+ ITensorInfo *ans_1_info = FunctionType::create_op(sketch, ans_0_info, args...);
+ GpuOutput::create_op(sketch, ans_1_info, dst_info);
+ }
+ else
+ {
+ GpuOutput::create_op(sketch, ans_0_info, dst_info);
+ }
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // Construct user tensors
+ TensorType t_src{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_src.allocator()->init(*src_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_src.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_src));
+
+ // Run runtime
+ runtime.run({&t_src, &t_dst});
+
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info)
+ {
+ // Create reference
+ SimpleTensor<T> src{shape, _data_type, 1};
+
+ // Fill reference
+ fill(src);
+
+ auto tmp = reference::activation_layer<T>(src, act_info);
+
+ if (_fuse)
+ {
+ auto dst = reference::activation_layer<T>(tmp, act_info);
+ return dst;
+ }
+ else
+ {
+ return tmp;
+ }
+ }
+
+protected:
+ ActivationLayerInfo::ActivationFunction _function{};
+ bool _fuse{false};
+ DataType _data_type{};
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionSigmoidValidationFixture
+ : public DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, bool fuse, DataType data_type)
+ {
+ ActivationLayerInfo act_info{ActivationLayerInfo::ActivationFunction::LOGISTIC};
+ DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, fuse,
+ data_type, act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionTanhValidationFixture
+ : public DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, bool fuse, DataType data_type)
+ {
+ ActivationLayerInfo act_info{ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
+ DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, fuse,
+ data_type, act_info);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
new file mode 100644
index 0000000000..08fffb305b
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/DepthConvertLayer.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class DynamicFusionCastValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
+ {
+ _target = compute_target(shape, dt_in, dt_out, policy);
+ _reference = compute_reference(shape, dt_in, dt_out, policy);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, DataType dt_in, DataType dt_out)
+ {
+ // Restricting range to avoid inf values
+ if (dt_out == DataType::F16)
+ {
+ constexpr int signed_min = -32000;
+ constexpr int signed_max = 32000;
+ constexpr int unsigned_min = 0;
+ constexpr int unsigned_max = 65000;
+
+ switch (dt_in)
+ {
+ case DataType::U8:
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ case DataType::S8:
+ case DataType::F32:
+ {
+ library->fill_tensor_uniform(tensor, i);
+ break;
+ }
+ case DataType::U16:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<uint16_t>(unsigned_min),
+ static_cast<uint16_t>(unsigned_max));
+ break;
+ }
+ case DataType::S16:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<int16_t>(signed_min),
+ static_cast<int16_t>(signed_max));
+ break;
+ }
+ case DataType::U32:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<uint32_t>(unsigned_min),
+ static_cast<uint32_t>(unsigned_max));
+ break;
+ }
+ case DataType::S32:
+ {
+ library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min),
+ static_cast<int32_t>(signed_max));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ // Given input is in nchw format
+ TensorType
+ compute_target(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
+ {
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ // Here, we use DataLayout::NCHW just for the test. However, the optimal data layout to
+ // be used with dynamic fusion is NHWC
+ ITensorInfo *src_info =
+ context.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important
+ ITensorInfo *dst_info = context.create_tensor_info();
+
+ CastAttributes attributes;
+ attributes.convert_policy(policy).data_type(dt_out);
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, src_info, attributes);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+
+ // Construct user tensors
+ TensorType t_src{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_src.allocator()->init(*src_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_src.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_src), 0, dt_in, dt_out);
+
+ // Run runtime
+ runtime.run({&t_src, &t_dst});
+ return t_dst;
+ }
+
+ SimpleTensor<T2>
+ compute_reference(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
+ {
+ // Create reference
+ SimpleTensor<T1> src{shape, dt_in, 1};
+
+ // Fill reference
+ fill(src, 0, dt_in, dt_out);
+
+ return reference::depth_convert<T1, T2>(src, dt_out, policy, 0);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T2> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h
new file mode 100644
index 0000000000..e8f6f83e42
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionClampValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape, ClampAttributes attributes, bool fuse, DataType data_type)
+ {
+ // CLAMP is implemented as LU_BOUNDED_RELU with the alpha and beta variables swapped.
+ ActivationLayerInfo act_info{ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, attributes.max_val(), attributes.min_val() };
+
+ _fuse = fuse;
+ _attributes = attributes;
+ _data_type = data_type;
+ _target = compute_target(shape, attributes);
+ _reference = compute_reference(shape, act_info);
+ }
+
+protected:
+ std::vector<T> get_boundary_values(T min, T max)
+ {
+ // This function will return a vector filled with the following values that can
+ // represent two partitions derived from equivalent partitioning.
+ // * Lower partition: min, min + delta, lower quarter (nominal), center - delta
+ // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
+ const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1);
+ const auto center_value = (min + max) / 2;
+ const auto lower_quarter = (min + center_value) / 2;
+ const auto upper_quarter = (center_value + max) / 2;
+
+ std::vector<T> boundary_values{};
+
+ // To ensure all the inserted values are within the given range after subtracing/adding delta
+ auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
+ {
+ for(auto &v : new_values)
+ {
+ if(v >= min && v <= max)
+ {
+ boundary_values.emplace_back(v);
+ }
+ }
+ };
+
+ insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) }); // lower partition
+ insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition
+
+ return boundary_values;
+ }
+
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ float min_bound = 0;
+ float max_bound = 0;
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, _data_type);
+ library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
+ }
+
+ TensorType compute_target(const TensorShape &shape, ClampAttributes attributes)
+ {
+ // Create a new workload sketch
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext context{ &cl_compile_ctx };
+ GpuWorkloadSketch sketch{ &context };
+
+ // Create sketch tensors
+ ITensorInfo* src_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type));
+ ITensorInfo* dst_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type));
+
+ ITensorInfo *ans_0_info = FunctionType::create_op(sketch, src_info, attributes);
+ if(_fuse)
+ {
+ ITensorInfo *ans_1_info = FunctionType::create_op(sketch, ans_0_info, attributes);
+ GpuOutput::create_op(sketch, ans_1_info, dst_info);
+ }
+ else
+ {
+ GpuOutput::create_op(sketch, ans_0_info, dst_info);
+ }
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // Construct user tensors
+ TensorType t_src{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_src.allocator()->init(*src_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_src.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_src));
+
+ // Run runtime
+ runtime.run({ &t_src, &t_dst });
+
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info)
+ {
+ // Create reference
+ SimpleTensor<T> src{ shape, _data_type, 1, _quantization_info };
+
+ // Fill reference
+ fill(src);
+
+ auto dst = reference::activation_layer<T>(src, act_info, _quantization_info);
+ return dst;
+ }
+
+protected:
+ QuantizationInfo _quantization_info{};
+ ClampAttributes _attributes{};
+ bool _fuse{ false };
+ DataType _data_type{};
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/MulFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/MulFixture.h
new file mode 100644
index 0000000000..f02aa5e36a
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/MulFixture.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_MULFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_MULFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/Globals.h"
+#include "tests/validation/reference/PixelWiseMultiplication.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+/* We use a separate test fixture for Multiplication op instead of reusing ElementwiseBinaryFixture to avoid exposing
+ * the internal enum ElementwiseOp to the public utils/TypePrinters.h as required by the data test case macros
+ * to print the test data.
+ */
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionMulValidationFixture : public framework::Fixture
+{
+public:
+ void setup(const TensorShape &shape0,
+ const TensorShape &shape1,
+ const TensorShape &shape2,
+ DataType data_type,
+ bool is_inplace,
+ bool fuse_two_ops = false)
+ {
+ _data_type = data_type;
+ _is_inplace = is_inplace;
+ _fuse = fuse_two_ops;
+ ARM_COMPUTE_ERROR_ON_MSG(_fuse && shape2.total_size() == 0, "No shape2 provided for fusion of two ops.");
+ ARM_COMPUTE_ERROR_ON_MSG(_fuse && _is_inplace, "In place for fusing case not supported yet.");
+ _target = compute_target(shape0, shape1, shape2);
+ _reference = compute_reference(shape0, shape1, shape2);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+
+ TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2)
+ {
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Fuse first multiplication op
+ ITensorInfo *lhs_info = context.create_tensor_info(TensorInfo(shape0, 1, _data_type));
+ ITensorInfo *rhs_info = context.create_tensor_info(TensorInfo(shape1, 1, _data_type));
+ ITensorInfo *dst_info = context.create_tensor_info();
+
+ ITensorInfo *rhs_info_fuse = nullptr;
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, lhs_info, rhs_info);
+
+ if (_fuse)
+ {
+ rhs_info_fuse = context.create_tensor_info(TensorInfo(shape2, 1, _data_type));
+ ITensorInfo *ans2_info = FunctionType::create_op(sketch, ans_info, rhs_info_fuse);
+ GpuOutput::create_op(sketch, ans2_info, dst_info);
+ }
+ else
+ {
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+ }
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+
+ // Construct user tensors
+ TensorType t_lhs{};
+ TensorType t_rhs{};
+ TensorType t_rhs_fuse{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_lhs.allocator()->init(*lhs_info);
+ t_rhs.allocator()->init(*rhs_info);
+ t_dst.allocator()->init(*dst_info);
+ if (_fuse)
+ {
+ t_rhs_fuse.allocator()->init(*rhs_info_fuse);
+ }
+
+ // Allocate and fill user tensors
+ // Instead of using ACL allocator, the user can choose to import memory into the tensors
+ t_lhs.allocator()->allocate();
+ t_rhs.allocator()->allocate();
+ t_dst.allocator()->allocate();
+ if (_fuse)
+ {
+ t_rhs_fuse.allocator()->allocate();
+ }
+
+ fill(AccessorType(t_lhs), 0);
+ fill(AccessorType(t_rhs), 1);
+ if (_fuse)
+ {
+ fill(AccessorType(t_rhs_fuse), 2);
+ }
+
+ // Run runtime
+ if (_fuse)
+ {
+ runtime.run({&t_lhs, &t_rhs, &t_rhs_fuse, &t_dst});
+ }
+ else
+ {
+ runtime.run({&t_lhs, &t_rhs, &t_dst});
+ }
+
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2)
+ {
+ // Create reference
+ SimpleTensor<T> ref_lhs{shape0, _data_type, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_rhs{shape1, _data_type, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_rhs_fuse{shape2, _data_type, 1, QuantizationInfo()};
+
+ // Fill reference
+ fill(ref_lhs, 0);
+ fill(ref_rhs, 1);
+ SimpleTensor<T> ref_dst = reference::pixel_wise_multiplication<T, T, T>(
+ ref_lhs, ref_rhs, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, _data_type,
+ QuantizationInfo());
+ if (_fuse)
+ {
+ fill(ref_rhs_fuse, 2);
+ SimpleTensor<T> ref_dst_fuse = reference::pixel_wise_multiplication<T, T, T>(
+ ref_dst, ref_rhs_fuse, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, _data_type,
+ QuantizationInfo());
+ return ref_dst_fuse;
+ }
+ return ref_dst;
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ bool _is_inplace{false};
+ bool _fuse{false};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionMulOneOpValidationFixture
+ : public DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape0, DataType data_type, bool is_inplace)
+ {
+ DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ shape0, shape0, TensorShape(), data_type, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionMulBroadcastValidationFixture
+ : public DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, bool is_inplace)
+ {
+ DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ shape0, shape1, TensorShape(), data_type, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionMulTwoOpsValidationFixture
+ : public DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(const TensorShape &shape0,
+ const TensorShape &shape1,
+ const TensorShape &shape2,
+ DataType data_type,
+ bool is_inplace,
+ bool fuse_two_ops)
+ {
+ DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ shape0, shape1, shape2, data_type, is_inplace, fuse_two_ops);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_MULFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h
new file mode 100644
index 0000000000..bde3360940
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESHAPEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESHAPEFIXTURE_H
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/ReshapeAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h"
+
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/Globals.h"
+#include "tests/validation/reference/ReshapeLayer.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionGpuReshapeLayerValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type)
+ {
+ _target = compute_target(input_shape, output_shape, data_type);
+ _reference = compute_reference(input_shape, output_shape, data_type);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+
+ TensorType compute_target(TensorShape &input_shape, TensorShape &output_shape, DataType data_type)
+ {
+ // Check if indeed the input shape can be reshape to the output one
+ ARM_COMPUTE_ASSERT(input_shape.total_size() == output_shape.total_size());
+
+ // Create a new workload sketch
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ ITensorInfo *src_info = context.create_tensor_info(TensorInfo(input_shape, 1, data_type));
+ ITensorInfo *dst_info = context.create_tensor_info(TensorInfo(output_shape, 1, data_type));
+ ReshapeAttributes attributes;
+ attributes.shape(output_shape);
+
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, src_info, attributes);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+
+ // Construct user tensors
+ TensorType t_src{};
+ TensorType t_dst{};
+ // Initialize user tensors
+ t_src.allocator()->init(*src_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_src.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_src), 0);
+
+ // Run runtime
+ runtime.run({&t_src, &t_dst});
+
+ return t_dst;
+ }
+
+ SimpleTensor<T>
+ compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type)
+ {
+ // Create reference
+ SimpleTensor<T> src{input_shape, data_type};
+
+ // Fill reference
+ fill(src, 0);
+
+ return reference::reshape_layer<T>(src, output_shape);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+/** [ReshapeLayer fixture] **/
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESHAPEFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h
new file mode 100644
index 0000000000..711767b66f
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h
@@ -0,0 +1,272 @@
+/*
+* Copyright (c) 2022-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE_H
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/SimpleTensor.h"
+#include "tests/validation/reference/Permute.h"
+#include "tests/validation/reference/Scale.h"
+#include "tests/validation/Validation.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionResizeGenericValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape,
+ DataType data_type,
+ QuantizationInfo quantization_info,
+ DataLayout data_layout,
+ InterpolationPolicy interpolation_policy,
+ SamplingPolicy sampling_policy,
+ bool align_corners,
+ QuantizationInfo output_quantization_info)
+ {
+ _shape = shape;
+ _interpolation_policy = interpolation_policy;
+ _sampling_policy = sampling_policy;
+ _data_type = data_type;
+ _input_quantization_info = quantization_info;
+ _output_quantization_info = output_quantization_info;
+ _align_corners = align_corners;
+ _data_layout = data_layout;
+
+ ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); // Dynamic fusion resize supports only NHWC layout
+
+ generate_scale(shape);
+
+ std::mt19937 generator(library->seed());
+ std::uniform_int_distribution<uint32_t> distribution_u8(0, 255);
+
+ _target = compute_target(shape);
+ _reference = compute_reference(shape);
+ }
+
+protected:
+ void generate_scale(const TensorShape &shape)
+ {
+ static constexpr float _min_scale{0.25f};
+ static constexpr float _max_scale{3.f};
+
+ constexpr float max_width{8192.0f};
+ constexpr float max_height{6384.0f};
+ constexpr float min_width{1.f};
+ constexpr float min_height{1.f};
+
+ std::mt19937 generator(library->seed());
+ std::uniform_real_distribution<float> distribution_float(_min_scale, _max_scale);
+
+ auto generate = [&](size_t input_size, float min_output, float max_output) -> int
+ {
+ const float generated_scale = distribution_float(generator);
+ const int output_size = static_cast<int>(
+ utility::clamp(static_cast<float>(input_size) * generated_scale, min_output, max_output));
+ return output_size;
+ };
+
+ // Input shape is always given in NCHW layout. NHWC is dealt by permute in compute_target()
+ const int idx_width = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::HEIGHT);
+
+ _output_width = generate(shape[idx_width], min_width, max_width);
+ _output_height = generate(shape[idx_height], min_height, max_height);
+ }
+
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ if (tensor.data_type() == DataType::F32)
+ {
+ std::uniform_real_distribution<float> distribution(-5.0f, 5.0f);
+ library->fill(tensor, distribution, 0);
+ }
+ else if (tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-5.0f, 5.0f};
+ library->fill(tensor, distribution, 0);
+ }
+ else if (is_data_type_quantized(tensor.data_type()))
+ {
+ std::uniform_int_distribution<> distribution(0, 100);
+ library->fill(tensor, distribution, 0);
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+ }
+
+ TensorType compute_target(TensorShape shape)
+ {
+ // Our test shapes are assumed in NCHW data layout, thus the permutation
+ permute(shape, PermutationVector(2U, 0U, 1U));
+
+ // Create a new workload sketch
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ // Create sketch tensors
+ ITensorInfo *src_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type, _data_layout));
+ src_info->set_quantization_info(_input_quantization_info);
+ ITensorInfo *dst_info = context.create_tensor_info();
+
+ ResizeAttributes attributes;
+ attributes.align_corners(_align_corners)
+ .sampling_policy(_sampling_policy)
+ .interpolation_policy(_interpolation_policy)
+ .output_width(_output_width)
+ .output_height(_output_height);
+
+ ITensorInfo *scale_result_info = FunctionType::create_op(sketch, src_info, attributes);
+ GpuOutput::create_op(sketch, scale_result_info, dst_info);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+
+ // Construct user tensors
+ TensorType t_src{};
+ TensorType t_dst{};
+
+ // Initialize user tensors
+ t_src.allocator()->init(*src_info);
+ t_dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ t_src.allocator()->allocate();
+ t_dst.allocator()->allocate();
+
+ fill(AccessorType(t_src));
+
+ // Run runtime
+ runtime.run({&t_src, &t_dst});
+
+ return t_dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape)
+ {
+ // Create reference
+ SimpleTensor<T> src{shape, _data_type, 1, _input_quantization_info};
+
+ // Reference code is NCHW, so the input shapes are NCHW
+ const int idx_width = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::HEIGHT);
+
+ const float scale_x = static_cast<float>(_output_width) / shape[idx_width];
+ const float scale_y = static_cast<float>(_output_height) / shape[idx_height];
+
+ // Fill reference
+ fill(src);
+
+ return reference::scale<T>(src, scale_x, scale_y, _interpolation_policy, BorderMode::REPLICATE,
+ static_cast<T>(0), _sampling_policy, /* ceil_policy_scale */ false, _align_corners,
+ _output_quantization_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ TensorShape _shape{};
+ InterpolationPolicy _interpolation_policy{};
+ SamplingPolicy _sampling_policy{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
+ bool _align_corners{false};
+ int _output_width{0};
+ int _output_height{0};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionResizeValidationFixture
+ : public DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape,
+ DataType data_type,
+ DataLayout data_layout,
+ InterpolationPolicy policy,
+ SamplingPolicy sampling_policy,
+ bool align_corners)
+ {
+ DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ shape, data_type, QuantizationInfo(), data_layout, policy, sampling_policy, align_corners,
+ QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
+class DynamicFusionResizeQuantizedValidationFixture
+ : public DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape,
+ DataType data_type,
+ QuantizationInfo quantization_info,
+ DataLayout data_layout,
+ InterpolationPolicy policy,
+ SamplingPolicy sampling_policy,
+ bool align_corners)
+ {
+ DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ shape, data_type, quantization_info, data_layout, policy, sampling_policy, align_corners,
+ quantization_info);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE_H
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h
new file mode 100644
index 0000000000..175d4ff889
--- /dev/null
+++ b/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h
@@ -0,0 +1,158 @@
+/*
+* Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE_H
+
+#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
+#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+
+#include "tests/framework/Fixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/SimpleTensor.h"
+#include "tests/validation/reference/SoftmaxLayer.h"
+#include "tests/validation/Validation.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionSoftmaxValidationGenericFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape, DataType data_type, float beta, size_t axis, bool is_log)
+ {
+ _reference = compute_reference(shape, data_type, beta, axis, is_log);
+ _target = compute_target(shape, data_type, beta, axis, is_log);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ if (tensor.data_type() == DataType::F32)
+ {
+ std::uniform_real_distribution<float> distribution(-10.0f, 10.0f);
+ library->fill(tensor, distribution, 0);
+ }
+ else if (tensor.data_type() == DataType::F16)
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-10.0f, 10.0f};
+ library->fill(tensor, distribution, 0);
+ }
+ else if (!is_data_type_quantized(tensor.data_type()))
+ {
+ std::uniform_int_distribution<> distribution(0, 100);
+ library->fill(tensor, distribution, 0);
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+ }
+
+ TensorType compute_target(const TensorShape &shape, DataType data_type, float beta, int32_t axis, bool is_log)
+ {
+ // Create a new workload sketch
+ CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ GpuWorkloadContext context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
+
+ SoftmaxAttributes softmax_attr{};
+ softmax_attr.axis(axis).beta(beta).is_log_softmax(is_log);
+ ITensorInfo *src_info = context.create_tensor_info(shape, 1, data_type);
+ ITensorInfo *dst_info = context.create_tensor_info(shape, 1, data_type);
+ FunctionType::create_op(sketch, src_info, dst_info, softmax_attr);
+
+ // Configure runtime
+ ClWorkloadRuntime runtime;
+ runtime.configure(sketch);
+
+ // (Important) Allocate auxiliary tensor memory if there are any
+ // Instead of using ACL allocated memory, the user can choose to import memory into the tensors
+ for (auto &data : runtime.get_auxiliary_tensors())
+ {
+ CLTensor *tensor = std::get<0>(data);
+ TensorInfo info = std::get<1>(data);
+ AuxMemoryInfo aux_mem_req = std::get<2>(data);
+ tensor->allocator()->init(info, aux_mem_req.alignment);
+ tensor->allocator()->allocate(); // Use ACL allocated memory
+ }
+ // Construct user tensors
+ TensorType src{};
+ TensorType dst{};
+
+ // Initialize user tensors
+ src.allocator()->init(*src_info);
+ dst.allocator()->init(*dst_info);
+
+ // Allocate and fill user tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+ fill(AccessorType(src));
+
+ // Run runtime
+ runtime.run({&src, &dst});
+
+ return dst;
+ }
+
+ SimpleTensor<T>
+ compute_reference(const TensorShape &shape, DataType data_type, float beta, int32_t axis, bool is_log)
+ {
+ // Create reference
+ SimpleTensor<T> src{shape, data_type, 1};
+
+ // Fill reference
+ fill(src);
+
+ return reference::softmax_layer<T>(src, beta, axis, is_log);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DynamicFusionSoftmaxValidationFixture
+ : public DynamicFusionSoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, DataType data_type, float beta, size_t axis, bool is_log)
+ {
+ DynamicFusionSoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ shape, data_type, beta, axis, is_log);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE_H