aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2020-02-11 17:21:31 +0000
committerSiCong Li <sicong.li@arm.com>2020-04-03 08:51:12 +0000
commit8b2a7d3aa119e7f1d6a03690d05eb27c5d178b9f (patch)
tree9fb4f4f328f7a17de13bef109834e8ad8a21d2ee /tests/validation/fixtures/PixelWiseMultiplicationFixture.h
parent15e4d876643c37e1db36ee1190ec52319479ffaf (diff)
downloadComputeLibrary-8b2a7d3aa119e7f1d6a03690d05eb27c5d178b9f.tar.gz
COMPMID-3101 Fuse activation with floating point elementwise operation layers in CL
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: I1693f8664ba7c0dc8c076bbe7365cef1e667bd25 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2718 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/PixelWiseMultiplicationFixture.h')
-rw-r--r--tests/validation/fixtures/PixelWiseMultiplicationFixture.h66
1 files changed, 47 insertions, 19 deletions
diff --git a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
index 37359f421b..f561a37a71 100644
--- a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
+++ b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
@@ -31,6 +31,7 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/PixelWiseMultiplication.h"
namespace arm_compute
@@ -46,18 +47,19 @@ public:
template <typename...>
void setup(const TensorShape &shape0,
const TensorShape &shape1,
- DataType dt_in1,
- DataType dt_in2,
- DataType dt_out,
- float scale,
- ConvertPolicy convert_policy,
- RoundingPolicy rounding_policy,
- QuantizationInfo qinfo0,
- QuantizationInfo qinfo1,
- QuantizationInfo qinfo_out)
+ DataType dt_in1,
+ DataType dt_in2,
+ DataType dt_out,
+ float scale,
+ ConvertPolicy convert_policy,
+ RoundingPolicy rounding_policy,
+ QuantizationInfo qinfo0,
+ QuantizationInfo qinfo1,
+ QuantizationInfo qinfo_out,
+ ActivationLayerInfo act_info)
{
- _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out);
- _reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out);
+ _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
+ _reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
}
protected:
@@ -69,7 +71,7 @@ protected:
TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out,
float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
{
// Create tensors
TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0);
@@ -78,7 +80,7 @@ protected:
// Create and configure function
FunctionType multiply;
- multiply.configure(&src1, &src2, &dst, scale, convert_policy, rounding_policy);
+ multiply.configure(&src1, &src2, &dst, scale, convert_policy, rounding_policy, act_info);
ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -105,7 +107,7 @@ protected:
SimpleTensor<T3> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out,
float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
{
// Create reference
SimpleTensor<T1> src1{ shape0, dt_in1, 1, qinfo0 };
@@ -115,7 +117,8 @@ protected:
fill(src1, 0);
fill(src2, 1);
- return reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy, rounding_policy, dt_out, qinfo_out);
+ auto result = reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy, rounding_policy, dt_out, qinfo_out);
+ return act_info.enabled() ? reference::activation_layer(result, act_info, qinfo_out) : result;
}
TensorType _target{};
@@ -130,7 +133,7 @@ public:
void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
{
PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
}
};
@@ -142,7 +145,32 @@ public:
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
{
PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape0, shape1, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class PixelWiseMultiplicationValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info)
+ {
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class PixelWiseMultiplicationBroadcastValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
+ ActivationLayerInfo act_info)
+ {
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape0, shape1, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
}
};
@@ -154,8 +182,8 @@ public:
void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
- PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy,
- rounding_policy, qinfo0, qinfo1, qinfo_out);
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
}
};
} // namespace validation