aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-03-08 10:08:20 +0000
committerKevin May <kevin.may@arm.com>2023-03-08 13:57:20 +0000
commit084cb4dcb9eca3eac3fc634f052ddb7d7fcc0bb4 (patch)
tree9cdbb9ca9855f9655ba1bbf97691b25a671c5821
parent0637bf38b24bba3a3d88f34ed956111a3abddda2 (diff)
downloadandroid-nn-driver-branches/android-nn-driver_23_02.tar.gz
IVGCVSW-7404 Out of bounds detectionv23.02branches/android-nn-driver_23_02
* Added test to ensure that all inputs and outputs do not go out of bounds. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ia97e85f71e46cd2203306243e4dcbc23e0f29ec1
-rw-r--r--ArmnnPreparedModel.cpp24
-rw-r--r--ArmnnPreparedModel_1_2.cpp24
-rw-r--r--ArmnnPreparedModel_1_3.cpp26
-rw-r--r--Utils.cpp65
-rw-r--r--Utils.hpp9
5 files changed, 136 insertions, 12 deletions
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 41740435..d87f9f82 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -218,6 +218,7 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(
NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
+
// add the inputs and outputs with their data
try
{
@@ -225,11 +226,19 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(
for (unsigned int i = 0; i < request.inputs.size(); i++)
{
const auto& inputArg = request.inputs[i];
-
armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
// pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
// Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
inputTensorInfo.SetConstant();
+ auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+ inputTensorInfo,
+ inputArg,
+ "input");
+ if (result != V1_0::ErrorStatus::NONE)
+ {
+ return result;
+ }
+
const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools);
if (inputTensor.GetMemoryArea() == nullptr)
{
@@ -244,8 +253,17 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(
for (unsigned int i = 0; i < request.outputs.size(); i++)
{
const auto& outputArg = request.outputs[i];
-
const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+ auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+ outputTensorInfo,
+ outputArg,
+ "output");
+
+ if (result != V1_0::ErrorStatus::NONE)
+ {
+ return result;
+ }
+
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools);
if (outputTensor.GetMemoryArea() == nullptr)
{
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index c54ee354..a401b30e 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -312,11 +312,20 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForIn
for (unsigned int i = 0; i < request.inputs.size(); i++)
{
const auto& inputArg = request.inputs[i];
-
armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
// inputs (of type InputTensors) is composed of a vector of ConstTensors.
// Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
inputTensorInfo.SetConstant();
+ auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+ inputTensorInfo,
+ inputArg,
+ "input");
+
+ if (result != V1_0::ErrorStatus::NONE)
+ {
+ return result;
+ }
+
const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
if (inputTensor.GetMemoryArea() == nullptr)
@@ -342,8 +351,17 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForOu
for (unsigned int i = 0; i < request.outputs.size(); i++)
{
const auto& outputArg = request.outputs[i];
+ armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+ auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+ outputTensorInfo,
+ outputArg,
+ "output");
+
+ if (result != V1_0::ErrorStatus::NONE)
+ {
+ return result;
+ }
- const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
if (outputTensor.GetMemoryArea() == nullptr)
{
diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp
index 20b49f5d..ceeb3c48 100644
--- a/ArmnnPreparedModel_1_3.cpp
+++ b/ArmnnPreparedModel_1_3.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
// Note: the ArmnnFencedExecutionCallback and code snippet in the executeFenced() function
@@ -510,11 +510,20 @@ Return<V1_3::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::PrepareMemoryForIn
for (unsigned int i = 0; i < request.inputs.size(); i++)
{
const auto& inputArg = request.inputs[i];
-
armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
// inputs (of type InputTensors) is composed of a vector of ConstTensors.
// Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
inputTensorInfo.SetConstant();
+ auto result = ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(request,
+ inputTensorInfo,
+ inputArg,
+ "input");
+
+ if (result != V1_3::ErrorStatus::NONE)
+ {
+ return result;
+ }
+
const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
if (inputTensor.GetMemoryArea() == nullptr)
@@ -540,15 +549,24 @@ Return<V1_3::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::PrepareMemoryForOu
for (unsigned int i = 0; i < request.outputs.size(); i++)
{
const auto& outputArg = request.outputs[i];
-
armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+ auto result = ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(request,
+ outputTensorInfo,
+ outputArg,
+ "output");
+
+ if (result != V1_3::ErrorStatus::NONE)
+ {
+ return result;
+ }
+
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
+
if (outputTensor.GetMemoryArea() == nullptr)
{
ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
return V1_3::ErrorStatus::GENERAL_FAILURE;
}
-
const size_t outputSize = outputTensorInfo.GetNumBytes();
unsigned int count = 0;
diff --git a/Utils.cpp b/Utils.cpp
index 884bed00..13eb84d5 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -767,4 +767,67 @@ void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
#endif
}
}
+
+size_t GetSize(const V1_0::Request& request, const V1_0::RequestArgument& requestArgument)
+{
+ return request.pools[requestArgument.location.poolIndex].size();
+}
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+size_t GetSize(const V1_3::Request& request, const V1_0::RequestArgument& requestArgument)
+{
+ if (request.pools[requestArgument.location.poolIndex].getDiscriminator() ==
+ V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory)
+ {
+ return request.pools[requestArgument.location.poolIndex].hidlMemory().size();
+ }
+ else
+ {
+ return 0;
+ }
+}
+#endif
+
+template <typename ErrorStatus, typename Request>
+ErrorStatus ValidateRequestArgument(const Request& request,
+ const armnn::TensorInfo& tensorInfo,
+ const V1_0::RequestArgument& requestArgument,
+ std::string descString)
+{
+ if (requestArgument.location.poolIndex >= request.pools.size())
+ {
+ std::string err = fmt::format("Invalid {} pool at index {} the pool index is greater than the number "
+ "of available pools {}",
+ descString, requestArgument.location.poolIndex, request.pools.size());
+ ALOGE(err.c_str());
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ const size_t size = GetSize(request, requestArgument);
+ size_t totalLength = tensorInfo.GetNumBytes();
+
+ if (static_cast<size_t>(requestArgument.location.offset) + totalLength > size)
+ {
+ std::string err = fmt::format("Invalid {} pool at index {} the offset {} and length {} are greater "
+ "than the pool size {}", descString, requestArgument.location.poolIndex,
+ requestArgument.location.offset, totalLength, size);
+ ALOGE(err.c_str());
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return ErrorStatus::NONE;
+}
+
+template V1_0::ErrorStatus ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(
+ const V1_0::Request& request,
+ const armnn::TensorInfo& tensorInfo,
+ const V1_0::RequestArgument& requestArgument,
+ std::string descString);
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+template V1_3::ErrorStatus ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(
+ const V1_3::Request& request,
+ const armnn::TensorInfo& tensorInfo,
+ const V1_0::RequestArgument& requestArgument,
+ std::string descString);
+#endif
+
} // namespace armnn_driver
diff --git a/Utils.hpp b/Utils.hpp
index 6e733a26..81be984c 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -11,6 +11,8 @@
#include <NeuralNetworks.h>
#include <Utils.h>
+#include <fmt/format.h>
+
#include <vector>
#include <string>
#include <fstream>
@@ -194,4 +196,9 @@ inline V1_2::OutputShape ComputeShape(const armnn::TensorInfo& info)
void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
+template <typename ErrorStatus, typename Request>
+ErrorStatus ValidateRequestArgument(const Request& request,
+ const armnn::TensorInfo& tensorInfo,
+ const V1_0::RequestArgument& requestArgument,
+ std::string descString);
} // namespace armnn_driver