aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-07-27 13:23:15 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-07-28 13:57:09 +0000
commitec0959b02b3d4cfcb82deb12f5e952524bcb71ba (patch)
treeb79aea0a558c00d7924493bde3e7b3a695d25dbd
parent40f6ea107f5dd829d69384ea71349771a804a1e1 (diff)
downloadarmnn-experimental/IVGCVSW-6964_Weight_Bias_ConstTensor.tar.gz
IVGCVSW-7925 Add REVERSE V2 to Support Library (SL)experimental/IVGCVSW-6964_Weight_Bias_ConstTensor
* Fix typos in README Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I6e30536d353fae7a7828d9e02e7301ab8dd8c115
-rw-r--r--shim/sl/README.md4
-rw-r--r--shim/sl/canonical/Converter.cpp59
-rw-r--r--shim/sl/canonical/Converter.hpp2
3 files changed, 63 insertions, 2 deletions
diff --git a/shim/sl/README.md b/shim/sl/README.md
index 46509656f7..32f117a562 100644
--- a/shim/sl/README.md
+++ b/shim/sl/README.md
@@ -4,13 +4,13 @@ This directory contains the Arm NN Support Library for the Android Neural Networ
# Passing parameters to the support library runtime.
-The support library inherits it's parameters from the Arm NN Android Neural Networks driver. Parameters are passed to it through an environment variable, ARMNN_SL_OPTIONS. A full list of parameters are available ./canonical/DriverOptions.cpp.
+The support library inherits its parameters from the Arm NN Android Neural Networks driver. Parameters are passed to it through an environment variable, ARMNN_SL_OPTIONS. A full list of parameters are available ./canonical/DriverOptions.cpp.
# Sample usage
## Running NeuralNetworksSupportLibraryTest
-This test suite takes as it's first argument the path to a shared object implementation of the support library. Any library dependencies should be resolvable through the LD_LIBRARY_PATH mechanism. Setting ARMNN_SL_OPTIONS will pass parameters to the Arm NN Support Library Neural Networks driver.
+This test suite takes as its first argument the path to a shared object implementation of the support library. Any library dependencies should be resolvable through the LD_LIBRARY_PATH mechanism. Setting ARMNN_SL_OPTIONS will pass parameters to the Arm NN Support Library Neural Networks driver.
Here we assume that Bash is the current shell and specify "-v" to enable verbose logging and "-c CpuAcc" to direct that the Neon(TM) accelerator be used.
~~~
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index 790fad69dd..5b8c450733 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -150,6 +150,8 @@ bool Converter::ConvertOperation(const Operation& operation, const Model& model,
return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
case OperationType::RESIZE_NEAREST_NEIGHBOR:
return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
+ case OperationType::REVERSE:
+ return ConvertReverseV2(operation, model, data);
case OperationType::RSQRT:
return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
case OperationType::SIN:
@@ -4789,6 +4791,63 @@ bool Converter::ConvertResize(const Operation& operation,
return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
}
+bool Converter::ConvertReverseV2(const Operation& operation, const Model& model, ConversionData& data)
+{
+ VLOG(DRIVER) << "Converter::ConvertReverseV2()";
+
+ LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+ if (!input0.IsValid() || !input1.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+ const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+ const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
+
+ const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+ if (!outputOperand)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsReverseV2Supported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ inputInfo0,
+ inputInfo1,
+ outputInfo);
+ };
+
+ if(!IsDynamicTensor(outputInfo))
+ {
+ validateFunc(outputInfo, isSupported);
+ }
+ else
+ {
+ isSupported = AreDynamicTensorsSupported();
+ }
+
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const layer = data.m_Network->AddReverseV2Layer();
+ layer->SetBackendId(setBackend);
+ assert(layer != nullptr);
+ input0.Connect(layer->GetInputSlot(0));
+ input1.Connect(layer->GetInputSlot(1));
+
+ return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
{
VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
diff --git a/shim/sl/canonical/Converter.hpp b/shim/sl/canonical/Converter.hpp
index bf660b94cf..d19498d2d4 100644
--- a/shim/sl/canonical/Converter.hpp
+++ b/shim/sl/canonical/Converter.hpp
@@ -137,6 +137,8 @@ private:
ConversionData& data,
armnn::ResizeMethod resizeMethod);
+ static bool ConvertReverseV2(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);