diff options
Diffstat (limited to 'tests/validation/fixtures')
3 files changed, 11 insertions, 34 deletions
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h index 65a3363e24..4c1cc94d3d 100644 --- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h @@ -203,7 +203,7 @@ protected: bool pretranspose_b, DataType data_type) { - // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D + // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 3D // This is necessary unless we choose to extend gemm reference for 5D+ tensors TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ); TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimZ); diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h index dd3519b549..b0c7143d91 100644 --- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h @@ -51,11 +51,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DynamicFusionGpuPool2dValidationGenericFixture : public framework::Fixture { public: - void setup(TensorShape input_shape, const Pool2dAttributes &pool_attr, DataType data_type, bool mixed_precision) + void setup(TensorShape input_shape, const Pool2dAttributes &pool_attr, DataType data_type) { - _target = compute_target(input_shape, pool_attr, data_type, mixed_precision); - _reference = - compute_reference(input_shape, convert_pool_attr_to_pool_info(pool_attr, mixed_precision), data_type); + _target = compute_target(input_shape, pool_attr, data_type); + _reference = compute_reference( + input_shape, convert_pool_attr_to_pool_info(pool_attr, true /* mixed_precision */), data_type); } protected: @@ -82,10 +82,7 @@ protected: } // Given input is in nchw format - TensorType compute_target(TensorShape input_shape, - const Pool2dAttributes &pool_attr, - const DataType data_type, - bool mixed_precision) + TensorType compute_target(TensorShape input_shape, const Pool2dAttributes &pool_attr, const DataType data_type) { CLScheduler::get().default_reinit(); @@ -102,7 +99,7 @@ protected: auto dst_info = context.create_tensor_info(); // Create Pool2dSettings - GpuPool2dSettings pool_settings = GpuPool2dSettings().mixed_precision(mixed_precision); + GpuPool2dSettings pool_settings = GpuPool2dSettings(); ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, pool_attr, pool_settings); GpuOutput::create_op(sketch, ans_info, dst_info); @@ -168,29 +165,7 @@ public: input_shape, Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding( exclude_padding), - data_type, false); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DynamicFusionGpuPool2dMixedPrecisionValidationFixture - : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - void setup(TensorShape input_shape, - PoolingType pool_type, - Size2D pool_size, - Padding2D pad, - Size2D stride, - bool exclude_padding, - DataType data_type, - bool mixed_precision) - { - DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( - input_shape, - Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding( - exclude_padding), - data_type, mixed_precision); + data_type); } }; @@ -202,7 +177,7 @@ public: void setup(TensorShape input_shape, Pool2dAttributes pool_attr, DataType data_type) { DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( - input_shape, pool_attr, data_type, false); + input_shape, pool_attr, data_type); } }; diff --git a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h index edf0dff54b..08fffb305b 100644 --- a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h @@ -120,6 +120,8 @@ protected: GpuWorkloadSketch sketch{&context}; // Create sketch tensors + // Here, we use DataLayout::NCHW just for the test. However, the optimal data layout to + // be used with dynamic fusion is NHWC ITensorInfo *src_info = context.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important ITensorInfo *dst_info = context.create_tensor_info(); |