diff options
author | SiCongLi <sicong.li@arm.com> | 2021-10-29 15:05:49 +0100 |
---|---|---|
committer | SiCong Li <sicong.li@arm.com> | 2021-11-01 14:29:51 +0000 |
commit | eb8bd81a625f0f87080dbde55b434362ad57324a (patch) | |
tree | fda1de0843be17266388d0d137908f392a7f694e /src | |
parent | 1af5416917268692fcd4b34b1d7ffebd3a2aea8a (diff) | |
download | ComputeLibrary-eb8bd81a625f0f87080dbde55b434362ad57324a.tar.gz |
Fix dst "widening" validation
* Auto-initialize the dst tensor before checking for PostOp shape
compliance so that we catch the invalid case of "widening" dst tensor
shape
* Rework post op validate test cases to be more readable
Partially resolves: COMPMID-4435
Change-Id: I79943994182942f962e4d59a7fa0d6f017ae9ac7
Signed-off-by: SiCongLi <sicong.li@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6548
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/core/CL/CLUtils.cpp | 10 | ||||
-rw-r--r-- | src/core/experimental/PostOp.h | 10 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp | 4 |
3 files changed, 16 insertions, 8 deletions
diff --git a/src/core/CL/CLUtils.cpp b/src/core/CL/CLUtils.cpp index 1da970e705..748b0f55a1 100644 --- a/src/core/CL/CLUtils.cpp +++ b/src/core/CL/CLUtils.cpp @@ -85,16 +85,24 @@ PostOpCLKernelUtils::PostOpCLKernelUtils(const Config &supported_config) bool PostOpCLKernelUtils::are_post_op_shapes_compliant(const ITensorInfo *dst, const experimental::PostOpList<ITensorInfo *> &post_ops) { - // All post ops must be elementwise and must not alter the shape of the original dst tensor after broadcasting for(const auto &op : post_ops.get_list()) { for(const auto &tensor : op->arguments()) { const TensorShape &out_shape = TensorShape::broadcast_shape(dst->tensor_shape(), (*tensor)->tensor_shape()); + // All post ops must be elementwise and must not alter the shape of the original dst tensor after broadcasting if(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0)) { return false; } + // NOTE: Kernel limitation: currently only the following broadcasting types are supported: + // 1. Post op arg is scalar, broadcast in both X and Y + // 2. Post op arg is of shape: Y=1, X=N, broadcast only in Y + // This means this case: Post op arg is of shape: Y=M, X=1, broadcast only in X, is NOT supported + if(dst->dimension(0) > 1 && dst->dimension(1) > 1 && (*tensor)->dimension(0) == 1 && (*tensor)->dimension(1) > 1) + { + return false; + } } } return true; diff --git a/src/core/experimental/PostOp.h b/src/core/experimental/PostOp.h index 64414d2050..7d62bd95e1 100644 --- a/src/core/experimental/PostOp.h +++ b/src/core/experimental/PostOp.h @@ -79,9 +79,9 @@ template <typename TensorRelatedT> struct PostOpEltwiseAdd : public IPostOp<TensorRelatedT> { public: - PostOpEltwiseAdd(TensorRelatedT addend, int prev_op_arg_pos, ConvertPolicy policy) + PostOpEltwiseAdd(TensorRelatedT addend, int prev_dst_pos, ConvertPolicy policy) : _addend{ addend }, - _prev_op_arg_pos{ prev_op_arg_pos }, + _prev_dst_pos{ prev_dst_pos }, _policy{ policy } { } @@ -93,7 +93,7 @@ public: PostOpEltwiseAdd &operator=(PostOpEltwiseAdd &&) = default; int prev_dst_pos() const override { - return _prev_op_arg_pos; + return _prev_dst_pos; } PostOpType type() const override { @@ -112,7 +112,7 @@ public: return std::make_unique<PostOpEltwiseAdd<TensorRelatedT>>(*this); } TensorRelatedT _addend; - int _prev_op_arg_pos; + int _prev_dst_pos; ConvertPolicy _policy; }; @@ -135,7 +135,7 @@ PostOpList<ToTensorT> transform_post_op_list_arguments(const PostOpList<FromTens case PostOpType::Eltwise_Add: { const auto _post_op = utils::cast::polymorphic_downcast<const PostOpEltwiseAdd<FromTensorT> *>(post_op.get()); - transformed_post_ops.template push_back_op<PostOpEltwiseAdd<ToTensorT>>(transform_arg(_post_op->_addend), _post_op->_prev_op_arg_pos, _post_op->_policy); + transformed_post_ops.template push_back_op<PostOpEltwiseAdd<ToTensorT>>(transform_arg(_post_op->_addend), _post_op->_prev_dst_pos, _post_op->_policy); break; } default: diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp index 4b28e2badc..8ee72d3f03 100644 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp @@ -182,11 +182,11 @@ void ClGemmMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi { ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info)); - // dst tensor auto initialization if not yet initialized auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info))); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info)); + auto padding_info = get_padding_info({ src0, src1, src2, dst }); _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0; _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device()); |