diff options
author | Francis Murtagh <francis.murtagh@arm.com> | 2019-11-05 14:26:23 +0000 |
---|---|---|
committer | Francis Murtagh <francis.murtagh@arm.com> | 2019-11-06 10:15:51 +0000 |
commit | ec33a91ec1557b78b2d01975ec4c5eaf24aa058c (patch) | |
tree | d6e26d6b59421e00dcceed4715e7a76d7a2ebc4f /src/backends/cl | |
parent | 3201eea0565ce2bb0418d1936fec71bdeb14c084 (diff) | |
download | armnn-ec33a91ec1557b78b2d01975ec4c5eaf24aa058c.tar.gz |
IVGCVSW-4038 Convert Strided_Slice Shrink_Axis_Mask Parameter to ACL format
* Add conversion method to reverse bits in Shrink_Axis_Mask
* Add Unit tests for Neon, CL and Reference backends
* Fix supportedness of constant layer which is causing error
in DeepSpeech Uint8
* Also convert the Begin_Mask and End_Mask
Change-Id: I448b083c3463558e8fb5204923ab554cd43264ba
Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Diffstat (limited to 'src/backends/cl')
-rw-r--r-- | src/backends/cl/ClLayerSupport.cpp | 2 | ||||
-rw-r--r-- | src/backends/cl/test/ClLayerTests.cpp | 29 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClStridedSliceWorkload.cpp | 16 |
3 files changed, 40 insertions, 7 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index be565a523a..de9d1c5bcb 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -275,7 +275,7 @@ bool ClLayerSupport::IsConstantSupported(const TensorInfo& output, return IsSupportedForDataTypeCl(reasonIfUnsupported, output.GetDataType(), &TrueFunc<>, - &FalseFuncU8<>); + &TrueFunc<>); } bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 909ebc73c2..0fc8ece498 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -568,6 +568,21 @@ ARMNN_AUTO_TEST_CASE(StridedSlice4dReverseFloat32, StridedSlice4dReverseFloat32T ARMNN_AUTO_TEST_CASE(StridedSliceSimpleStrideFloat32, StridedSliceSimpleStrideFloat32Test) ARMNN_AUTO_TEST_CASE(StridedSliceSimpleRangeMaskFloat32, StridedSliceSimpleRangeMaskFloat32Test) ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskFloat32, StridedSliceShrinkAxisMaskFloat32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskCTSFloat32, StridedSliceShrinkAxisMaskCTSFloat32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0Dim3Float32, + StridedSliceShrinkAxisMaskBitPosition0Dim3Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0Float32, StridedSliceShrinkAxisMaskBitPosition0Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition1Float32, StridedSliceShrinkAxisMaskBitPosition1Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition2Float32, StridedSliceShrinkAxisMaskBitPosition2Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition3Float32, StridedSliceShrinkAxisMaskBitPosition3Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And1Float32, + StridedSliceShrinkAxisMaskBitPosition0And1Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And2Float32, + StridedSliceShrinkAxisMaskBitPosition0And2Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And3Float32, + StridedSliceShrinkAxisMaskBitPosition0And3Float32Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And1And3Float32, + StridedSliceShrinkAxisMaskBitPosition0And1And3Float32Test) ARMNN_AUTO_TEST_CASE(StridedSlice3dFloat32, StridedSlice3dFloat32Test) ARMNN_AUTO_TEST_CASE(StridedSlice3dReverseFloat32, StridedSlice3dReverseFloat32Test) ARMNN_AUTO_TEST_CASE(StridedSlice2dFloat32, StridedSlice2dFloat32Test) @@ -578,6 +593,20 @@ ARMNN_AUTO_TEST_CASE(StridedSlice4dReverseUint8, StridedSlice4dReverseUint8Test) ARMNN_AUTO_TEST_CASE(StridedSliceSimpleStrideUint8, StridedSliceSimpleStrideUint8Test) ARMNN_AUTO_TEST_CASE(StridedSliceSimpleRangeMaskUint8, StridedSliceSimpleRangeMaskUint8Test) ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskUint8, StridedSliceShrinkAxisMaskUint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8, + StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0Uint8, StridedSliceShrinkAxisMaskBitPosition0Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition1Uint8, StridedSliceShrinkAxisMaskBitPosition1Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition2Uint8, StridedSliceShrinkAxisMaskBitPosition2Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition3Uint8, StridedSliceShrinkAxisMaskBitPosition3Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And1Uint8, + StridedSliceShrinkAxisMaskBitPosition0And1Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And2Uint8, + StridedSliceShrinkAxisMaskBitPosition0And2Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And3Uint8, + StridedSliceShrinkAxisMaskBitPosition0And3Uint8Test) +ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8, + StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8Test) ARMNN_AUTO_TEST_CASE(StridedSlice3dUint8, StridedSlice3dUint8Test) ARMNN_AUTO_TEST_CASE(StridedSlice3dReverseUint8, StridedSlice3dReverseUint8Test) ARMNN_AUTO_TEST_CASE(StridedSlice2dUint8, StridedSlice2dUint8Test) diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp index e51fa34233..6b0a34d90e 100644 --- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp +++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp @@ -11,7 +11,9 @@ #include <aclCommon/ArmComputeTensorUtils.hpp> #include <backendsCommon/CpuTensorHandle.hpp> +#include <backendsCommon/WorkloadUtils.hpp> +#include <boost/numeric/conversion/cast.hpp> #include <cl/ClLayerSupport.hpp> #include <cl/ClTensorHandle.hpp> #include <cl/ClLayerSupport.hpp> @@ -34,9 +36,10 @@ arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo& input, std::tie(starts, ends, strides) = SetClStridedSliceData(descriptor.m_Begin, descriptor.m_End, descriptor.m_Stride); - int32_t begin_mask = descriptor.m_BeginMask; - int32_t end_mask = descriptor.m_EndMask; - int32_t shrink_axis_mask = descriptor.m_ShrinkAxisMask; + auto numDimensions = boost::numeric_cast<int>(input.GetNumDimensions()); + int32_t begin_mask = ConvertMaskToACLFormat(descriptor.m_BeginMask, numDimensions); + int32_t end_mask = ConvertMaskToACLFormat(descriptor.m_EndMask, numDimensions); + int32_t shrink_axis_mask = ConvertMaskToACLFormat(descriptor.m_ShrinkAxisMask, numDimensions); return arm_compute::CLStridedSlice::validate(&aclInputInfo, &aclOutputInfo, @@ -65,9 +68,10 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor m_Data.m_Parameters.m_End, m_Data.m_Parameters.m_Stride); - int32_t begin_mask = m_Data.m_Parameters.m_BeginMask; - int32_t end_mask = m_Data.m_Parameters.m_EndMask; - int32_t shrink_axis_mask = m_Data.m_Parameters.m_ShrinkAxisMask; + auto numDimensions = boost::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions()); + int32_t begin_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_BeginMask, numDimensions); + int32_t end_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_EndMask, numDimensions); + int32_t shrink_axis_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_ShrinkAxisMask, numDimensions); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); |