diff options
author | Mike Kelly <mike.kelly@arm.com> | 2023-05-05 15:35:18 +0100 |
---|---|---|
committer | Mike Kelly <mike.kelly@arm.com> | 2023-05-05 16:48:51 +0100 |
commit | 04f71205e6fc9d2b2bab386fba1b07240c642115 (patch) | |
tree | 1c6e955640d602b1b7875f1f02b004001ac69367 | |
parent | 26654cb71db2b1e163527f52c3198d9434bb0e37 (diff) | |
download | armnn-04f71205e6fc9d2b2bab386fba1b07240c642115.tar.gz |
MLCE-1050 Error handing Slice operators
* If the dimension Size[n] in a Slice is -1 then it should be treated as
"InputShape[n] - Begin[n]" but the Delegate simply cast the Size to
uint and treated it as 4294967295.
* Added the layer name that includes the node index to the Slice to aid
debugging.
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I45fa88b24982c3c97f48d0dc05cf7d9bb6db4074
-rw-r--r-- | delegate/classic/src/Slice.hpp | 62 | ||||
-rw-r--r-- | delegate/opaque/src/Slice.hpp | 61 | ||||
-rw-r--r-- | delegate/test/SliceTest.cpp | 53 |
3 files changed, 141 insertions, 35 deletions
diff --git a/delegate/classic/src/Slice.hpp b/delegate/classic/src/Slice.hpp index f19e3327e4..a586e024d1 100644 --- a/delegate/classic/src/Slice.hpp +++ b/delegate/classic/src/Slice.hpp @@ -11,6 +11,7 @@ #include <tensorflow/lite/c/builtin_op_data.h> #include <tensorflow/lite/c/common.h> #include <tensorflow/lite/minimal_logging.h> +#include <fmt/format.h> namespace armnnDelegate { @@ -41,15 +42,15 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, // We save the begin and size tensors in our descriptor. Therefore we have to read those values from inputs int inputRank = tfLiteInputs[0]->dims->size; - auto ReadInt32Input = [&](int inputIndex, std::vector<uint32_t>& outputData) -> TfLiteStatus + auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData, const char* name) -> TfLiteStatus { if (tfLiteInputs[inputIndex]->type != kTfLiteInt32) { TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, - "TfLiteArmnnDelegate: The Begin- and Size-Tensors of the Slice operation need to " + "TfLiteArmnnDelegate: The %s Tensor of the Slice operation needs to " "be of type int32. Operator: #%d node #%d: ", - sliceOperatorCode, nodeIndex); + name, sliceOperatorCode, nodeIndex); return kTfLiteError; } int rank = tfLiteInputs[inputIndex]->dims->size; @@ -57,9 +58,9 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, { TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, - "TfLiteArmnnDelegate: The Begin- and Size-Tensors of the Slice operation need to " + "TfLiteArmnnDelegate: The %s Tensor of the Slice operation needs to " "be a 1D-Tensor. Operator: #%d node #%d: ", - sliceOperatorCode, nodeIndex); + name, sliceOperatorCode, nodeIndex); return kTfLiteError; } int numValues = tfLiteInputs[inputIndex]->dims->data[0]; @@ -67,23 +68,53 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, { TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, - "TfLiteArmnnDelegate: The number of values in the Begin- and Size-Tensors of the " - "Slice operation need to be equal to the rank of the Input-Tensor. Operator: #%d node #%d: ", - sliceOperatorCode, nodeIndex); + "TfLiteArmnnDelegate: The number of values in the %s Tensor of the " + "Slice operation needs to be equal to the rank of the Input Tensor. Operator: #%d node #%d: ", + name, sliceOperatorCode, nodeIndex); return kTfLiteError; } // return tensor data - auto* tensorDataPtr = tflite::GetTensorData<uint32_t>(tfLiteInputs[inputIndex]); - outputData.assign(tensorDataPtr, tensorDataPtr+numValues); + auto* tensorDataPtr = tflite::GetTensorData<int32_t>(tfLiteInputs[inputIndex]); + outputData.assign(tensorDataPtr, tensorDataPtr + numValues); return kTfLiteOk; }; - std::vector<uint32_t> begin; - if (ReadInt32Input(1, begin) != kTfLiteOk) + std::vector<int32_t> signedBegin; + if (ReadInt32Input(1, signedBegin, "Begin") != kTfLiteOk) + { return kTfLiteError; - std::vector<uint32_t> size; - if (ReadInt32Input(2, size) != kTfLiteOk) + } + + std::vector<int32_t> signedSize; + if (ReadInt32Input(2, signedSize, "Size") != kTfLiteOk) + { return kTfLiteError; + } + std::vector<uint32_t> begin({ signedBegin.begin(), signedBegin.end() }); + std::vector<uint32_t> size(signedSize.size()); + + for (unsigned int i = 0; i < signedSize.size(); ++i) + { + int signedValue = signedSize[i]; + if (signedValue < -1 || signedValue > tfLiteInputs[0]->dims->data[i] - signedBegin[i]) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Invalid value for Size. Size must be in range [-1, inputDimSize - begin] " + "[-1, %d] inclusive but was %d Operator: #%d node #%d: ", + tfLiteInputs[0]->dims->data[i] - signedBegin[i], signedValue, sliceOperatorCode, + nodeIndex); + return kTfLiteError; + } + if (signedValue == -1) + { + size[i] = tfLiteInputs[0]->dims->data[i] - signedBegin[i]; + } + else + { + size[i] = static_cast<uint32_t>(signedValue); + } + } // Write all data to the descriptor armnn::SliceDescriptor descriptor(begin, size); @@ -118,9 +149,10 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, validateFunc(outputTensorInfo, isSupported); return isSupported ? kTfLiteOk : kTfLiteError; } + auto layerName = fmt::format("Slice:{}", nodeIndex); // Add a Slice layer - armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor, layerName.c_str()); layer->SetBackendId(setBackend); ARMNN_ASSERT(layer != nullptr); diff --git a/delegate/opaque/src/Slice.hpp b/delegate/opaque/src/Slice.hpp index 2064b2e7e4..e39e4afcec 100644 --- a/delegate/opaque/src/Slice.hpp +++ b/delegate/opaque/src/Slice.hpp @@ -6,6 +6,7 @@ #pragma once #include <OpaqueDelegateUtils.hpp> +#include <fmt/format.h> namespace armnnOpaqueDelegate { @@ -49,15 +50,15 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, // We save the begin and size tensors in our descriptor. Therefore we have to read those values from inputs unsigned int inputRank = inputTensorInfo.GetNumDimensions(); - auto ReadInt32Input = [&](int inputIndex, std::vector<uint32_t>& outputData) -> TfLiteStatus + auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData, const char* name) -> TfLiteStatus { if (TfLiteOpaqueTensorType(tfLiteInputTensors[inputIndex]) != kTfLiteInt32) { TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( tfLiteContext, - "TfLiteArmnnOpaqueDelegate: The Begin- and Size-Tensors of the Slice operation need to " + "TfLiteArmnnOpaqueDelegate: The %s Tensor of the Slice operation needs to " "be of type int32. Operator: #%d node #%d: ", - tfLiteSliceOperatorCode, nodeIndex); + name, tfLiteSliceOperatorCode, nodeIndex); return kTfLiteError; } uint32_t rank = TfLiteOpaqueTensorNumDims(tfLiteInputTensors[inputIndex]); @@ -65,9 +66,9 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, { TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( tfLiteContext, - "TfLiteArmnnOpaqueDelegate: The Begin- and Size-Tensors of the Slice operation need to " + "TfLiteArmnnOpaqueDelegate: The %s Tensor of the Slice operation needs to " "be a 1D-Tensor. Operator: #%d node #%d: ", - tfLiteSliceOperatorCode, nodeIndex); + name, tfLiteSliceOperatorCode, nodeIndex); return kTfLiteError; } uint32_t numValues = TfLiteOpaqueTensorDim(tfLiteInputTensors[inputIndex], 0); @@ -75,23 +76,54 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, { TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( tfLiteContext, - "TfLiteArmnnOpaqueDelegate: The number of values in the Begin- and Size-Tensors of the " - "Slice operation need to be equal to the rank of the Input-Tensor. Operator: #%d node #%d: ", - tfLiteSliceOperatorCode, nodeIndex); + "TfLiteArmnnOpaqueDelegate: The number of values in the %s Tensor of the " + "Slice operation needs to be equal to the rank of the Input Tensor. Operator: #%d node #%d: ", + name, tfLiteSliceOperatorCode, nodeIndex); return kTfLiteError; } // return tensor data - auto* tensorDataPtr = static_cast<uint32_t*>(TfLiteOpaqueTensorData(tfLiteInputTensors[inputIndex])); + auto* tensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteInputTensors[inputIndex])); outputData.assign(tensorDataPtr, tensorDataPtr + numValues); return kTfLiteOk; }; - std::vector<uint32_t> begin; - if (ReadInt32Input(1, begin) != kTfLiteOk) + std::vector<int32_t> signedBegin; + if (ReadInt32Input(1, signedBegin, "Begin") != kTfLiteOk) + { return kTfLiteError; - std::vector<uint32_t> size; - if (ReadInt32Input(2, size) != kTfLiteOk) + } + + std::vector<int32_t> signedSize; + if (ReadInt32Input(2, signedSize, "Size") != kTfLiteOk) + { return kTfLiteError; + } + + std::vector<uint32_t> begin({ signedBegin.begin(), signedBegin.end() }); + std::vector<uint32_t> size(signedSize.size()); + + for (unsigned int i = 0; i < signedSize.size(); ++i) + { + int signedValue = signedSize[i]; + if (signedValue < -1 || signedValue > TfLiteOpaqueTensorDim(tfLiteInputTensors[0], i) - signedBegin[i]) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Invalid value for Size. Size must be in range [-1, inputDimSize - begin] " + "[-1, %d] inclusive but was %d Operator: #%d node #%d: ", + TfLiteOpaqueTensorDim(tfLiteInputTensors[0], i) - signedBegin[i], signedValue, + tfLiteSliceOperatorCode, nodeIndex); + return kTfLiteError; + } + if (signedValue == -1) + { + size[i] = TfLiteOpaqueTensorDim(tfLiteInputTensors[0], i) - signedBegin[i]; + } + else + { + size[i] = static_cast<uint32_t>(signedValue); + } + } // Write all data to the descriptor armnn::SliceDescriptor descriptor(begin, size); @@ -137,9 +169,10 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData, validateFunc(outputTensorInfo, isSupported); return isSupported ? kTfLiteOk : kTfLiteError; } + auto layerName = fmt::format("Slice:{}", nodeIndex); // Add a Slice layer - armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor, layerName.c_str()); layer->SetBackendId(setBackend); ARMNN_ASSERT(layer != nullptr); diff --git a/delegate/test/SliceTest.cpp b/delegate/test/SliceTest.cpp index 88a70de03f..9e54f735f8 100644 --- a/delegate/test/SliceTest.cpp +++ b/delegate/test/SliceTest.cpp @@ -41,6 +41,33 @@ void SliceFixtureSimpleTest(std::vector<armnn::BackendId>& backends) outputShape); } +void SliceFixtureSizeTest(std::vector<armnn::BackendId>& backends) +{ + std::vector<int32_t> inputShape { 3, 2, 3 }; + std::vector<int32_t> outputShape { 2, 1, 3 }; + std::vector<int32_t> beginShape { 3 }; + std::vector<int32_t> sizeShape { 3 }; + + std::vector<int32_t> beginData { 1, 0, 0 }; + std::vector<int32_t> sizeData { 2, 1, -1 }; + std::vector<float> inputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, + 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f, + 5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f }; + std::vector<float> outputData { 3.0f, 3.0f, 3.0f, + 5.0f, 5.0f, 5.0f }; + + SliceTestImpl<float>( + backends, + inputData, + outputData, + beginData, + sizeData, + inputShape, + beginShape, + sizeShape, + outputShape); +} + TEST_SUITE("Slice_CpuRefTests") { @@ -50,32 +77,46 @@ TEST_CASE ("Slice_Simple_CpuRef_Test") SliceFixtureSimpleTest(backends); } -} // Slice_CpuRefTests TestSuite - +TEST_CASE ("Slice_Size_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef}; + SliceFixtureSizeTest(backends); +} +} // Slice_CpuRefTests TestSuite TEST_SUITE("Slice_CpuAccTests") { TEST_CASE ("Slice_Simple_CpuAcc_Test") { - std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef}; + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc}; SliceFixtureSimpleTest(backends); } -} // Slice_CpuAccTests TestSuite - +TEST_CASE ("Slice_Size_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc}; + SliceFixtureSizeTest(backends); +} +} // Slice_CpuAccTests TestSuite TEST_SUITE("StridedSlice_GpuAccTests") { TEST_CASE ("Slice_Simple_GpuAcc_Test") { - std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef}; + std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; SliceFixtureSimpleTest(backends); } +TEST_CASE ("Slice_Size_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; + SliceFixtureSizeTest(backends); +} + } // Slice_GpuAccTests TestSuite } // namespace armnnDelegate
\ No newline at end of file |