aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2019-12-11 16:59:01 +0000
committerJim Flynn <jim.flynn@arm.com>2019-12-11 16:59:01 +0000
commit356bfec771858ed435874b525fd88da505380103 (patch)
tree328d85fbe343c9ad92e2cef9921520b58a5bffc1
parent26052fcf2f8c91f3479c9484354e88e8944d004d (diff)
downloadarmnn-356bfec771858ed435874b525fd88da505380103.tar.gz
IVGCVSW-4254 Patch Strided Slice CTS failures
Change-Id: Idc7e95f20b1fceb2135db4960877671c155b2f4b Signed-off-by: Jim Flynn <jim.flynn@arm.com>
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp14
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.hpp2
-rw-r--r--src/backends/cl/ClLayerSupport.cpp2
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.cpp16
-rw-r--r--src/backends/neon/workloads/NeonStridedSliceWorkload.cpp16
5 files changed, 36 insertions, 14 deletions
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 3185ba00d3..3abadd7563 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -184,4 +184,18 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle*
return weightPermuted;
}
+int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
+{
+ int32_t reversedMask = 0;
+ for (unsigned int i = 0; i < boost::numeric_cast<unsigned int>(numDim); ++i)
+ {
+ // Check if bit set in mask for each dimension
+ int32_t bit = (mask & 1 << i) != 0;
+ // Increment the new mask with the bits reversed
+ reversedMask += (bit << std::max(numDim-(boost::numeric_cast<int>(i)+1), 0));
+ }
+
+ return reversedMask;
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index ba69255183..b6d1b3b26f 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -173,6 +173,8 @@ void GatherTensorHandlePairs(const DescriptorType& descriptor,
}
}
+int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim);
+
armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
const PermutationVector& permutationVector,
void* permuteBuffer);
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 625d2348be..2bbd77acdb 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -242,7 +242,7 @@ bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
return IsSupportedForDataTypeCl(reasonIfUnsupported,
output.GetDataType(),
&TrueFunc<>,
- &FalseFuncU8<>);
+ &TrueFunc<>);
}
bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
index e51fa34233..6b0a34d90e 100644
--- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
@@ -11,7 +11,9 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadUtils.hpp>
+#include <boost/numeric/conversion/cast.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
@@ -34,9 +36,10 @@ arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo& input,
std::tie(starts, ends, strides) = SetClStridedSliceData(descriptor.m_Begin, descriptor.m_End, descriptor.m_Stride);
- int32_t begin_mask = descriptor.m_BeginMask;
- int32_t end_mask = descriptor.m_EndMask;
- int32_t shrink_axis_mask = descriptor.m_ShrinkAxisMask;
+ auto numDimensions = boost::numeric_cast<int>(input.GetNumDimensions());
+ int32_t begin_mask = ConvertMaskToACLFormat(descriptor.m_BeginMask, numDimensions);
+ int32_t end_mask = ConvertMaskToACLFormat(descriptor.m_EndMask, numDimensions);
+ int32_t shrink_axis_mask = ConvertMaskToACLFormat(descriptor.m_ShrinkAxisMask, numDimensions);
return arm_compute::CLStridedSlice::validate(&aclInputInfo,
&aclOutputInfo,
@@ -65,9 +68,10 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor
m_Data.m_Parameters.m_End,
m_Data.m_Parameters.m_Stride);
- int32_t begin_mask = m_Data.m_Parameters.m_BeginMask;
- int32_t end_mask = m_Data.m_Parameters.m_EndMask;
- int32_t shrink_axis_mask = m_Data.m_Parameters.m_ShrinkAxisMask;
+ auto numDimensions = boost::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
+ int32_t begin_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_BeginMask, numDimensions);
+ int32_t end_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_EndMask, numDimensions);
+ int32_t shrink_axis_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_ShrinkAxisMask, numDimensions);
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
index 9c24728004..356c0aea83 100644
--- a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
@@ -9,7 +9,7 @@
#include <neon/NeonTensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-
+#include <backendsCommon/WorkloadUtils.hpp>
namespace armnn
{
@@ -29,9 +29,10 @@ arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo& input,
descriptor.m_End,
descriptor.m_Stride);
- int32_t begin_mask = descriptor.m_BeginMask;
- int32_t end_mask = descriptor.m_EndMask;
- int32_t shrink_axis_mask = descriptor.m_ShrinkAxisMask;
+ auto numDimensions = boost::numeric_cast<int>(input.GetNumDimensions());
+ int32_t begin_mask = ConvertMaskToACLFormat(descriptor.m_BeginMask, numDimensions);
+ int32_t end_mask = ConvertMaskToACLFormat(descriptor.m_EndMask, numDimensions);
+ int32_t shrink_axis_mask = ConvertMaskToACLFormat(descriptor.m_ShrinkAxisMask, numDimensions);
return arm_compute::NEStridedSlice::validate(&aclInput,
&aclOutput,
@@ -60,9 +61,10 @@ NeonStridedSliceWorkload::NeonStridedSliceWorkload(const StridedSliceQueueDescri
m_Data.m_Parameters.m_End,
m_Data.m_Parameters.m_Stride);
- int32_t begin_mask = m_Data.m_Parameters.m_BeginMask;
- int32_t end_mask = m_Data.m_Parameters.m_EndMask;
- int32_t shrink_axis_mask = m_Data.m_Parameters.m_ShrinkAxisMask;
+ auto numDimensions = boost::numeric_cast<int>(info.m_InputTensorInfos[0].GetNumDimensions());
+ int32_t begin_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_BeginMask, numDimensions);
+ int32_t end_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_EndMask, numDimensions);
+ int32_t shrink_axis_mask = ConvertMaskToACLFormat(m_Data.m_Parameters.m_ShrinkAxisMask, numDimensions);
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);