diff options
author | Francesco Petrogalli <francesco.petrogalli@arm.com> | 2022-06-30 10:22:01 +0000 |
---|---|---|
committer | Francesco Petrogalli <francesco.petrogalli@arm.com> | 2022-07-19 09:26:27 +0000 |
commit | 553f6953fe3bdfad53c11c25f305a16d79d83b24 (patch) | |
tree | 73642b948b79662096f593458c6138d2f7f48ec6 /src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp | |
parent | 99c46475daf277aa53e6747f9e41209f418fed33 (diff) | |
download | ComputeLibrary-553f6953fe3bdfad53c11c25f305a16d79d83b24.tar.gz |
[ONCPUML-951] Variable weight support for Convolution.
API changes for NEGEMMConvolutionLayer and CpuGemmConv2d
Built with:
scons neon=1 opencl=0 os=linux arch=armv8.2-a multi_isa=1 \
build=native -j32 Werror=false validation_tests=1 build_dir=opt \
standalone=1 asserts=1 experimental_fixed_format_kernels=1 .
Tested with:
./build/opt/tests/arm_compute_validation
Hardware where the test executable was run:
Neoverse N1
Test coverage:
* NEGEMMConvolutionLayer, CpuGemmConv2d
* NHWC (the only one supported by the fixed-format kernels)
* F16, F32
* Shapes: RunSmall
Change-Id: I4fd3e495a7cbf61210ea02d37440ba9652934e99
Signed-off-by: Francesco Petrogalli <francesco.petrogalli@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7632
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp index 50fc5bdb8a..58e4861bc0 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp @@ -33,21 +33,21 @@ #include "kernels/a32_sgemm_8x6.hpp" -#ifdef ENABLE_FIXED_FORMAT_KERNELS +#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS #include "kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp" #include "kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp" #include "kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp" -#endif // ENABLE_FIXED_FORMAT_KERNELS +#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS #include "kernels/a64_hybrid_bf16fp32_dot_6x16.hpp" #include "kernels/a64_hybrid_bf16fp32_mmla_6x16.hpp" #include "kernels/a64_interleaved_bf16fp32_dot_8x12.hpp" #include "kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp" #include "kernels/a64_sgemm_8x12.hpp" -#ifdef ENABLE_FIXED_FORMAT_KERNELS +#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS #include "kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp" #include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp" -#endif // ENABLE_FIXED_FORMAT_KERNELS +#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS #include "kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp" #include "kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp" #include "kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp" @@ -89,7 +89,7 @@ GemmImplementation<bfloat16, float>::with_estimate( [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, float>::estimate_cycles<bfloat16>(args); }, [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, float>(args); } ), -#ifdef ENABLE_FIXED_FORMAT_KERNELS +#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS GemmImplementation<bfloat16, float>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "sve_ffinterleaved_bf16fp32_mmla_8x3VL", @@ -106,7 +106,7 @@ GemmImplementation<bfloat16, float>::with_estimate( [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_bf16fp32_mmla_6x4VL, bfloat16, float>::estimate_cycles<bfloat16>(args); }, [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_bf16fp32_mmla_6x4VL, bfloat16, float>(args); } ), -#endif // ENABLE_FIXED_FORMAT_KERNELS +#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS #endif // ARM_COMPUTE_ENABLE_SVE GemmImplementation<bfloat16, float>::with_estimate( GemmMethod::GEMM_HYBRID, @@ -136,7 +136,7 @@ GemmImplementation<bfloat16, float>::with_estimate( [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); }, [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, float>(args); } ), -#ifdef ENABLE_FIXED_FORMAT_KERNELS +#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS GemmImplementation<bfloat16, float>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_ffinterleaved_bf16fp32_mmla_8x12", @@ -161,7 +161,7 @@ GemmImplementation<bfloat16, float>::with_estimate( [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_dot_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); }, [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_dot_8x12, bfloat16, float>(args); } ), -#endif // ENABLE_FIXED_FORMAT_KERNELS +#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS GemmImplementation<bfloat16, float>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_sgemm_8x12", @@ -197,7 +197,7 @@ const GemmImplementation<bfloat16, float> *gemm_implementation_list<bfloat16, fl /* Explicitly instantiate the external functions for these types. */ template UniqueGemmCommon<bfloat16, float> gemm<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &); -template bool has_opt_gemm<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &); +template bool has_opt_gemm<bfloat16, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &); template KernelDescription get_gemm_method<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &); template std::vector<KernelDescription> get_compatible_kernels<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &); |