aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/ArithmeticOperationsFixture.h
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2021-06-29 17:34:06 +0100
committerSheri Zhang <sheri.zhang@arm.com>2021-07-13 15:36:03 +0000
commita387e271b1e02ffd5c2993702b9a21c1ed5c95fa (patch)
treef53416756c70c85d962218168ad3cd3359d9f5c8 /tests/validation/fixtures/ArithmeticOperationsFixture.h
parent6fc7d528382716de9e417c9dcf0fddf109446e9f (diff)
downloadComputeLibrary-a387e271b1e02ffd5c2993702b9a21c1ed5c95fa.tar.gz
Add in-place calculation support for CL elementwise arithmetic kernels
- Add in-place calculation support in ClArithmeticKernel, ClSaturatedArithmeticKernel and ClMulKernel - Add in-place test cases Resolves: COMPMID-4431 Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Change-Id: Id484bdb76b74478a33fedb471ae0c7f799c599f6 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5885 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/ArithmeticOperationsFixture.h')
-rw-r--r--tests/validation/fixtures/ArithmeticOperationsFixture.h117
1 files changed, 67 insertions, 50 deletions
diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h
index 7aa716d676..0f7e44e588 100644
--- a/tests/validation/fixtures/ArithmeticOperationsFixture.h
+++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h
@@ -47,13 +47,13 @@ class ArithmeticOperationGenericFixture : public framework::Fixture
public:
template <typename...>
void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool in_place)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool is_inplace)
{
- _op = op;
- _act_info = act_info;
- _in_place = in_place;
- _target = compute_target(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
- _reference = compute_reference(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
+ _op = op;
+ _act_info = act_info;
+ _is_inplace = is_inplace;
+ _target = compute_target(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
+ _reference = compute_reference(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
}
protected:
@@ -67,27 +67,51 @@ protected:
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
// Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type, 1, qinfo0);
- TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type, 1, qinfo1);
- TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), data_type, 1, qinfo_out);
- TensorType *dst_to_use = _in_place ? &ref_src1 : &dst;
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type, 1, qinfo0);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(out_shape, data_type, 1, qinfo_out);
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if(_is_inplace)
+ {
+ bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out);
+ bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if(src1_is_inplace)
+ {
+ actual_dst = &ref_src1;
+ }
+ else
+ {
+ actual_dst = &ref_src2;
+ }
+ }
// Create and configure function
FunctionType arith_op;
- arith_op.configure(&ref_src1, &ref_src2, dst_to_use, convert_policy, _act_info);
+ arith_op.configure(&ref_src1, &ref_src2, actual_dst, convert_policy, _act_info);
ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
- ARM_COMPUTE_ASSERT(dst_to_use->info()->is_resizable());
// Allocate tensors
ref_src1.allocator()->allocate();
ref_src2.allocator()->allocate();
- dst_to_use->allocator()->allocate();
ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
- ARM_COMPUTE_ASSERT(!dst_to_use->info()->is_resizable());
+
+ // If don't do in-place computation, still need to allocate original dst
+ if(!_is_inplace)
+ {
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
// Fill tensors
fill(AccessorType(ref_src1), 0);
@@ -96,38 +120,30 @@ protected:
// Compute function
arith_op.run();
- if(_in_place)
- {
- return ref_src1;
- }
- return dst;
+ return std::move(*actual_dst);
}
SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
- // current in-place implementation only supports same metadata of input and output tensors.
- // By ignoring output quantization information here, we can make test cases implementation much simpler.
- QuantizationInfo output_qinfo = _in_place ? qinfo0 : qinfo_out;
-
// Create reference
SimpleTensor<T> ref_src1{ shape0, data_type, 1, qinfo0 };
SimpleTensor<T> ref_src2{ shape1, data_type, 1, qinfo1 };
- SimpleTensor<T> ref_dst{ TensorShape::broadcast_shape(shape0, shape1), data_type, 1, output_qinfo };
+ SimpleTensor<T> ref_dst{ TensorShape::broadcast_shape(shape0, shape1), data_type, 1, qinfo_out };
// Fill reference
fill(ref_src1, 0);
fill(ref_src2, 1);
auto result = reference::arithmetic_operation<T>(_op, ref_src1, ref_src2, ref_dst, convert_policy);
- return _act_info.enabled() ? reference::activation_layer(result, _act_info, output_qinfo) : result;
+ return _act_info.enabled() ? reference::activation_layer(result, _act_info, qinfo_out) : result;
}
TensorType _target{};
SimpleTensor<T> _reference{};
reference::ArithmeticOperation _op{ reference::ArithmeticOperation::ADD };
ActivationLayerInfo _act_info{};
- bool _in_place{};
+ bool _is_inplace{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -135,10 +151,10 @@ class ArithmeticAdditionBroadcastValidationFixture : public ArithmeticOperationG
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -147,10 +163,10 @@ class ArithmeticAdditionValidationFixture : public ArithmeticOperationGenericFix
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -159,10 +175,10 @@ class ArithmeticAdditionBroadcastValidationFloatFixture : public ArithmeticOpera
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -171,10 +187,10 @@ class ArithmeticAdditionValidationFloatFixture : public ArithmeticOperationGener
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -183,11 +199,11 @@ class ArithmeticAdditionValidationQuantizedFixture : public ArithmeticOperationG
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy,
- qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false);
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
}
};
@@ -196,10 +212,11 @@ class ArithmeticAdditionValidationQuantizedBroadcastFixture : public ArithmeticO
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out,
+ bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy,
- qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false);
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
}
};
@@ -208,10 +225,10 @@ class ArithmeticSubtractionBroadcastValidationFixture : public ArithmeticOperati
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, bool in_place)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -221,10 +238,10 @@ class ArithmeticSubtractionBroadcastValidationFloatFixture : public ArithmeticOp
public:
template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info,
- bool in_place)
+ bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, in_place);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -233,10 +250,10 @@ class ArithmeticSubtractionValidationFixture : public ArithmeticOperationGeneric
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool in_place)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
}
};
@@ -245,10 +262,10 @@ class ArithmeticSubtractionValidationFloatFixture : public ArithmeticOperationGe
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool in_place)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, in_place);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace);
}
};
@@ -257,11 +274,11 @@ class ArithmeticSubtractionValidationQuantizedFixture : public ArithmeticOperati
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool in_place)
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy,
- qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), in_place);
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
}
};
@@ -271,10 +288,10 @@ class ArithmeticSubtractionValidationQuantizedBroadcastFixture : public Arithmet
public:
template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out,
- bool in_place)
+ bool is_inplace)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy,
- qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), in_place);
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace);
}
};
} // namespace validation