aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/test/NeonTensorHandleTests.cpp
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-12-04 17:23:09 +0000
committerNikhil Raj Arm <nikhil.raj@arm.com>2023-12-05 16:58:10 +0000
commita9c3267d1a20e69a9cc0ae98b52958a6277e2f0d (patch)
treecf8ba49f42bcdb6d8360ca705b247bdd656925f8 /src/backends/neon/test/NeonTensorHandleTests.cpp
parenta8337d7a1e9f5aa3ed380dd0f5a4cf7636360122 (diff)
downloadarmnn-a9c3267d1a20e69a9cc0ae98b52958a6277e2f0d.tar.gz
IVGCVSW-8159 Fixed issues building with NDK r26
* The compiler shipped with NDK r26 has stricter rules around certain warnings and deprecation notices. * Fixed warnings for unqualified call to 'std::move' * Fixed error where the half values weren't being cast to a float when calling 'std::nan' * Removed unnecessary subtensor unit tests for neon Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I4ceb46e55ff5f2a754452e3a43de2188d58bf927
Diffstat (limited to 'src/backends/neon/test/NeonTensorHandleTests.cpp')
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp260
1 files changed, 0 insertions, 260 deletions
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index a94e4dd187..bc8ad5de5a 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -89,266 +89,6 @@ TEST_CASE("NeonTensorHandleGetCapabilitiesPadding")
CHECK(capabilities[0].m_Value);
}
-TEST_CASE("ConcatonXorYPaddingRequiredTest")
-{
- armnn::INetworkPtr net(armnn::INetwork::Create());
-
- // Set up tensor infos
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
- const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({2, 3, 4, 2}, armnn::DataType::Float32);
-
- armnn::Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
- descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
- descriptor.m_StrideX = descriptor.m_StrideY = 1;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
- descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-
- // Create the network
- armnn::IConnectableLayer* const input0Layer = net->AddInputLayer(0, "input_0");
- input0Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- armnn::IConnectableLayer* pooling2dLayer0 = net->AddPooling2dLayer(descriptor, "pooling2d_0");
- pooling2dLayer0->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
- input0Layer->GetOutputSlot(0).Connect(pooling2dLayer0->GetInputSlot(0));
-
- armnn::IConnectableLayer* const input1Layer = net->AddInputLayer(1, "input_1");
- input1Layer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- armnn::IConnectableLayer* pooling2dLayer1 = net->AddPooling2dLayer(descriptor, "pooling2d_1");
- pooling2dLayer1->GetOutputSlot(0).SetTensorInfo(intermediateInfo);
- input1Layer->GetOutputSlot(0).Connect(pooling2dLayer1->GetInputSlot(0));
-
- std::array<armnn::TensorShape, 2> concatInputShapes = { intermediateInfo.GetShape(), intermediateInfo.GetShape() };
- armnn::IConnectableLayer* const concatLayer = net->AddConcatLayer(armnn::CreateDescriptorForConcatenation(
- concatInputShapes.begin(), concatInputShapes.end(), 2), "concatenation");
- concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- pooling2dLayer0->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
- pooling2dLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
-
- armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output");
- concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
- const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
-
- // Load graph into runtime
- armnn::NetworkId networkIdentifier;
- runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
-
- // now check the concat how many sub-tensors it is using..
- auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
- {
- if (subTensorHandle && subTensorHandle->GetParent())
- {
- return true;
- }
- return false;
- };
-
- unsigned int numberOfSubTensors = 0;
- for (auto&& layer : theGraph)
- {
- if(layer->GetType() == armnn::LayerType::Concat)
- {
- for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
- {
- const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
- if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
- {
- ++numberOfSubTensors;
- }
- }
- }
- }
- // sub-tensors should not be supported in this configuration
- ARMNN_ASSERT(numberOfSubTensors == 0);
-}
-
-TEST_CASE("SplitteronXorYPaddingRequiredTest")
-{
- using namespace armnn;
-
- unsigned int splitAxis = 2;
- unsigned int numSplit = 2;
-
- const TensorShape& inputShape = { 1, 1, 4, 4 };
- const armnn::TensorInfo intermediateInfo = armnn::TensorInfo({ 1, 1, 2, 4 }, armnn::DataType::Float32);
- const std::vector<TensorShape> outputShapes{{ 1, 1, 2, 4 },
- { 1, 1, 2, 4 }};
-
- const float qScale = 1.0f;
- const int32_t qOffset = 0;
-
- // Creates structures for input & output.
- std::vector<float> inputData{
- 9.0f, 27.0f, 18.0f, 36.0f,
- 18.0f, 9.0f, 18.0f, 9.0f,
- 27.0f, 18.0f, 9.0f, 27.0f,
- 9.0f, 27.0f, 9.0f, 18.0f,
- };
-
- std::vector<float> expectedOutput0{
- 7.0f, 11.0f, 13.0f, 9.0f,
- 7.0f, 11.0f, 13.0f, 9.0f
- };
-
- std::vector<float> expectedOutput1{
- 9.0f, 11.0f, 12.0f, 7.0f,
- 9.0f, 11.0f, 12.0f, 7.0f
- };
-
- // Builds up the structure of the network.
- INetworkPtr net(INetwork::Create());
-
- TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32, qScale, qOffset);
-
- // Pooling
- armnn::Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
- descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
- descriptor.m_StrideX = descriptor.m_StrideY = 1;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
- descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-
- // Splitter
- std::vector<unsigned int> splitterDimSizes(inputShape.GetNumDimensions());
-
- // Add current input shape to splitterDimSizes
- for (unsigned int i = 0; i < inputShape.GetNumDimensions(); ++i)
- {
- splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
- }
-
- if (splitterDimSizes[splitAxis] % numSplit != 0)
- {
- throw ParseException("Number of splits must evenly divide the dimension");
- }
-
- splitterDimSizes[splitAxis] /= numSplit;
-
- SplitterDescriptor splitDesc(numSplit, inputShape.GetNumDimensions());
-
- for (unsigned int g = 0; g < numSplit; ++g)
- {
- // Set the size of the views.
- for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
- {
- splitDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
- }
- splitDesc.SetViewOriginCoord(g, splitAxis, splitterDimSizes[splitAxis] * g);
- }
-
- IConnectableLayer* input = net->AddInputLayer(0, "input");
- IConnectableLayer* pooling2d0 = net->AddPooling2dLayer(descriptor, "pooling2d_0");
- IConnectableLayer* pooling2d1 = net->AddPooling2dLayer(descriptor, "pooling2d_1");
- IConnectableLayer* splitter = net->AddSplitterLayer(splitDesc, "splitter");
-
- // Connections
- Connect(input, splitter, inputTensorInfo, 0, 0);
- Connect(splitter, pooling2d0, intermediateInfo, 0, 0);
- Connect(splitter, pooling2d1, intermediateInfo, 1, 0);
-
- std::vector<IConnectableLayer*> pooling2dLayers{pooling2d0, pooling2d1};
-
- for (unsigned int i = 0; i < outputShapes.size(); ++i)
- {
- TensorInfo outputTensorInfo(outputShapes[i], armnn::DataType::Float32, qScale, qOffset);
- IConnectableLayer* output = net->AddOutputLayer(armnn::numeric_cast<LayerBindingId>(i));
- Connect(pooling2dLayers[i], output, outputTensorInfo, 0, 0);
- }
-
- std::map<int, std::vector<float>> inputTensorData = {{ 0,inputData }};
- std::map<int, std::vector<float>> expectedOutputData = {{ 0, expectedOutput0 }, { 1, expectedOutput1 }};
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
- const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
-
- // Load graph into runtime
- armnn::NetworkId networkIdentifier;
- runtime->LoadNetwork(networkIdentifier, std::move(optimizedNet));
-
- // now check the concat how many sub-tensors it is using..
- auto TraceSubTensorHandleAncestry = [](armnn::ITensorHandle* const subTensorHandle)
- {
- if (subTensorHandle && subTensorHandle->GetParent())
- {
- return true;
- }
- return false;
- };
-
- for (auto&& layer : theGraph)
- {
- if(layer->GetType() == armnn::LayerType::Pooling2d)
- {
- unsigned int numberOfSubTensors = 0;
- for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
- {
- const armnn::OutputSlot* slot = layer->GetInputSlot(i).GetConnectedOutputSlot();
- if (TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData()))
- {
- ++numberOfSubTensors;
- }
- }
- // sub-tensors should be supported in this configuration
- ARMNN_ASSERT(numberOfSubTensors == 0);
- }
- }
-
- InputTensors inputTensors;
- inputTensors.reserve(inputTensorData.size());
- for (auto&& it : inputTensorData)
- {
- TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
- inputTensorInfo.SetConstant(true);
- inputTensors.push_back({it.first,
- ConstTensor(inputTensorInfo, it.second.data())});
- }
- OutputTensors outputTensors;
- outputTensors.reserve(expectedOutputData.size());
- std::map<int, std::vector<float>> outputStorage;
- for (auto&& it : expectedOutputData)
- {
- std::vector<float> out(it.second.size());
- outputStorage.emplace(it.first, out);
- outputTensors.push_back({it.first,
- Tensor(runtime->GetOutputTensorInfo(networkIdentifier, it.first),
- outputStorage.at(it.first).data())});
- }
-
- // Does the inference.
- runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
-
- // Checks the results.
- float tolerance = 0.000001f;
- for (auto&& it : expectedOutputData)
- {
- std::vector<float> out = outputStorage.at(it.first);
- for (unsigned int i = 0; i < out.size(); ++i)
- {
- CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
- "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
-
- }
- }
-}
-
TEST_CASE("NeonTensorHandleFactoryMemoryManaged")
{
std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(