aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/FullyConnectedLayerFixture.h
diff options
context:
space:
mode:
authorJakub Sujak <jakub.sujak@arm.com>2023-03-29 11:16:18 +0100
committerJakub Sujak <jakub.sujak@arm.com>2023-04-04 14:09:21 +0000
commit617ed50000532877296fff93973590a8ab67f96d (patch)
treed571654a2c31dfb6d5ec1529160b9f3d3348938c /tests/validation/fixtures/FullyConnectedLayerFixture.h
parentf26ea2f8cc957a1e6faf0361dea805fb2e236061 (diff)
downloadComputeLibrary-617ed50000532877296fff93973590a8ab67f96d.tar.gz
Support dynamic weights for Fully Connected layers on GPU
The fully connected function and operator running on GPU have been adapted to support dynamic weights. Dynamic weights require the reshape and data layout conversion of weight tensors at runtime in the prepare stage of the operator. The implementation for GPU is identical to the CPU implementation. This patch also deprecates the `are_weights_reshaped` option in Fully Connected. Resolves: COMPMID-5870 Change-Id: I28f967695879d82cc91a928d95308a4e0e52a597 Signed-off-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9403 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/FullyConnectedLayerFixture.h')
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h2
1 files changed, 0 insertions, 2 deletions
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 7d1aa494ba..75bef144ad 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -333,7 +333,6 @@ private:
validate(AccessorType(target), ref, rel_tolerance_f32, 0, abs_tolerance_f32);
}
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
void validate_with_tolerance(TensorType &target, SimpleTensor<half_float::half> &ref)
{
constexpr AbsoluteTolerance<float> abs_tolerance_f16(0.3f);
@@ -342,7 +341,6 @@ private:
validate(AccessorType(target), ref, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
}
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
void validate_with_tolerance(TensorType &target, SimpleTensor<uint8_t> &ref)
{