aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/FullyConnectedLayerFixture.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-09-24 14:04:27 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2021-09-29 10:31:08 +0000
commit63e0beb9fb9646407d123e830165546e9129e95d (patch)
tree9bfe80e8d853327a82f9f622d89c3b43df0400f4 /tests/validation/fixtures/FullyConnectedLayerFixture.h
parentb1ba1e33f2b03b211f561123559c24517c0e5865 (diff)
downloadComputeLibrary-63e0beb9fb9646407d123e830165546e9129e95d.tar.gz
Add support for non-constant weights and biases in CpuFullyConnected
Changing the approach for specifying that weights and biases tensors are non-constant by making it a member of TensorInfo rather than an option of the functions. Resolves: COMPMID-4222, COMPMID-4811 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: I9b0081ccbcf8271ce029ba6755563d64c59e1d32 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6313 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/FullyConnectedLayerFixture.h')
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h128
1 files changed, 107 insertions, 21 deletions
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 7d767642f3..3048c56f6b 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -232,7 +232,7 @@ protected:
fill(weights, 1);
fill(bias, 2);
- return reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, output_shape), _activation_info, _quantization_info);
+ return reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, output_shape, _quantization_info), _activation_info, _quantization_info);
}
TensorType _target{};
@@ -273,7 +273,7 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class FullyConnectedWithDynamicWeightsFixture : public framework::Fixture
+class FullyConnectedWithDynamicTensorsFixture : public framework::Fixture
{
private:
template <typename U>
@@ -289,6 +289,16 @@ private:
std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
}
+ else if(_data_type == DataType::QASYMM8)
+ {
+ std::uniform_int_distribution<uint8_t> distribution(0, 30);
+ library->fill(tensor, distribution, i);
+ }
+ else if(_data_type == DataType::S32)
+ {
+ std::uniform_int_distribution<int32_t> distribution(-50, 50);
+ library->fill(tensor, distribution, i);
+ }
else
{
library->fill_tensor_uniform(tensor, i);
@@ -324,6 +334,11 @@ private:
constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.0001f);
validate(AccessorType(target), ref, rel_tolerance_f32, 0, abs_tolerance_f32);
}
+ else if(_data_type == DataType::QASYMM8)
+ {
+ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
+ validate(AccessorType(target), ref, tolerance_qasymm8);
+ }
else
{
validate(AccessorType(target), ref);
@@ -331,32 +346,51 @@ private:
}
public:
+ using TDecay = typename std::decay<T>::type;
+ using TBias = typename std::conditional < (std::is_same<TDecay, uint8_t>::value || std::is_same<TDecay, int8_t>::value), int32_t, T >::type;
+
template <typename...>
void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
- DataType data_type, ActivationLayerInfo activation_info)
+ DataType data_type, ActivationLayerInfo activation_info, bool constant_weights, bool constant_bias)
{
_data_type = data_type;
+ const bool is_quantized = is_data_type_quantized(data_type);
+
+ const DataType bias_data_type = (is_quantized) ? DataType::S32 : data_type;
+
+ const QuantizationInfo src_qinfo = is_quantized ? QuantizationInfo(0.1f, 10) : QuantizationInfo();
+ const QuantizationInfo weights_qinfo = is_quantized ? QuantizationInfo(0.3f, 20) : QuantizationInfo();
+ const QuantizationInfo dst_qinfo = is_quantized ? QuantizationInfo(0.2f, 5) : QuantizationInfo();
+
// Setup tensor meta-data
- TensorInfo src_info(src_shape, 1, data_type);
+ const TensorInfo src_info(src_shape, 1, data_type, src_qinfo);
_src.allocator()->init(src_info);
- TensorShape tr_weights_shape{ weights_shape[1], weights_shape[0] };
- TensorInfo wei_info(tr_weights_shape, 1, data_type);
+ TensorInfo wei_info(weights_shape, 1, data_type, weights_qinfo);
+ if(!constant_weights)
+ {
+ const TensorShape tr_weights_shape{ weights_shape[1], weights_shape[0] };
+ wei_info.set_tensor_shape(tr_weights_shape);
+ }
+ wei_info.set_are_values_constant(constant_weights);
_weights.allocator()->init(wei_info);
- TensorInfo bias_info(bias_shape, 1, data_type);
+ TensorInfo bias_info(bias_shape, 1, bias_data_type);
+ bias_info.set_are_values_constant(constant_bias);
_bias.allocator()->init(bias_info);
- TensorInfo dst_info(dst_shape, 1, data_type);
+ const TensorInfo dst_info(dst_shape, 1, data_type, dst_qinfo);
_dst.allocator()->init(dst_info);
// Configure FC layer and mark the weights as non constant
FullyConnectedLayerInfo fc_info;
- fc_info.activation_info = activation_info;
- fc_info.are_weights_reshaped = true;
- fc_info.transpose_weights = false;
- fc_info.constant_weights = false;
+ fc_info.activation_info = activation_info;
+ if(!constant_weights)
+ {
+ fc_info.are_weights_reshaped = true;
+ fc_info.transpose_weights = false;
+ }
FunctionType fc;
fc.configure(&_src, &_weights, &_bias, &_dst, fc_info);
@@ -369,29 +403,55 @@ public:
// Run multiple iterations with different inputs
constexpr int num_iterations = 5;
int randomizer_offset = 0;
+
+ // Create reference tensors
+ SimpleTensor<T> src{ src_shape, data_type, 1, src_qinfo };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, weights_qinfo };
+ SimpleTensor<TBias> bias{ bias_shape, bias_data_type };
+
+ // Fill weights and/or bias if they remain constant
+ if(constant_weights)
+ {
+ fill(AccessorType(_weights), 1);
+ fill(weights, 1);
+ }
+ if(constant_bias)
+ {
+ fill(AccessorType(_bias), 2);
+ fill(bias, 2);
+ }
+
for(int i = 0; i < num_iterations; ++i)
{
// Run target
{
fill(AccessorType(_src), randomizer_offset);
- fill_transposed_weights(_weights, weights_shape, randomizer_offset + 1);
- fill(AccessorType(_bias), randomizer_offset + 2);
+ if(!constant_weights)
+ {
+ fill_transposed_weights(_weights, weights_shape, randomizer_offset + 1);
+ }
+ if(!constant_bias)
+ {
+ fill(AccessorType(_bias), randomizer_offset + 2);
+ }
fc.run();
}
// Run reference and compare
{
- SimpleTensor<T> src{ src_shape, data_type };
- SimpleTensor<T> weights{ weights_shape, data_type };
- SimpleTensor<T> bias{ bias_shape, data_type };
-
// Fill reference
fill(src, randomizer_offset);
- fill(weights, randomizer_offset + 1);
- fill(bias, randomizer_offset + 2);
+ if(!constant_weights)
+ {
+ fill(weights, randomizer_offset + 1);
+ }
+ if(!constant_bias)
+ {
+ fill(bias, randomizer_offset + 2);
+ }
- auto dst = reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, dst_shape), activation_info);
+ auto dst = reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, dst_shape, dst_qinfo), activation_info, dst_qinfo);
// Validate
validate_with_tolerance(_dst, dst);
@@ -405,6 +465,32 @@ private:
TensorType _src{}, _weights{}, _bias{}, _dst{};
DataType _data_type{ DataType::UNKNOWN };
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FullyConnectedWithDynamicWeightsFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
+ DataType data_type, ActivationLayerInfo activation_info)
+ {
+ FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape,
+ dst_shape, data_type, activation_info, false, true);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FullyConnectedWithDynamicBiasFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
+ DataType data_type, ActivationLayerInfo activation_info)
+ {
+ FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape,
+ dst_shape, data_type, activation_info, true, false);
+ }
+};
} // namespace validation
} // namespace test
} // namespace arm_compute