aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/Workload.hpp16
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp3
-rw-r--r--src/backends/backendsCommon/WorkloadInfo.hpp17
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.cpp5
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp121
5 files changed, 132 insertions, 30 deletions
diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp
index f7895a6f1d..8ec09f98b6 100644
--- a/src/backends/backendsCommon/Workload.hpp
+++ b/src/backends/backendsCommon/Workload.hpp
@@ -7,7 +7,7 @@
#include "WorkloadData.hpp"
#include "WorkloadInfo.hpp"
-#include <armnn/Types.hpp>
+#include <armnn/backends/IWorkload.hpp>
#include <Profiling.hpp>
#include <ProfilingService.hpp>
@@ -16,20 +16,6 @@
namespace armnn
{
-/// Workload interface to enqueue a layer computation.
-class IWorkload
-{
-public:
- virtual ~IWorkload() {}
-
- virtual void PostAllocationConfigure() = 0;
- virtual void Execute() const = 0;
-
- virtual profiling::ProfilingGuid GetGuid() const = 0;
-
- virtual void RegisterDebugCallback(const DebugCallbackFunction& /*func*/) {}
-};
-
// NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template
// in the various workload factories.
// There should never be an instantiation of a NullWorkload.
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index c5fcf15c3b..46681e9def 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -31,7 +31,8 @@ struct QueueDescriptor
std::vector<ITensorHandle*> m_Outputs;
void ValidateInputsOutputs(const std::string& descName,
- unsigned int numExpectedIn, unsigned int numExpectedOut) const;
+ unsigned int numExpectedIn,
+ unsigned int numExpectedOut) const;
protected:
diff --git a/src/backends/backendsCommon/WorkloadInfo.hpp b/src/backends/backendsCommon/WorkloadInfo.hpp
index 304bc0bf06..cac147c500 100644
--- a/src/backends/backendsCommon/WorkloadInfo.hpp
+++ b/src/backends/backendsCommon/WorkloadInfo.hpp
@@ -2,17 +2,8 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-namespace armnn
-{
-
-/// Contains information about inputs and outputs to a layer.
-/// This is needed at construction of workloads, but are not stored.
-struct WorkloadInfo
-{
- std::vector<TensorInfo> m_InputTensorInfos;
- std::vector<TensorInfo> m_OutputTensorInfos;
-};
-
-} //namespace armnn
+// This file is depricated and will be removed soon.
+// Please use the new header in armnn/backends instead.
+// This will use the new armnn/backends header.
+#include <armnn/backends/WorkloadInfo.hpp>
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.cpp b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
index 40e063d8c4..bb1a5cda6c 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.cpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
@@ -71,4 +71,9 @@ ARMNN_SIMPLE_TEST_CASE(CreateReferenceDynamicBackend, CreateReferenceDynamicBack
#endif
+#if defined(SAMPLE_DYNAMIC_BACKEND_ENABLED)
+ARMNN_SIMPLE_TEST_CASE(CreateSampleDynamicBackend, CreateSampleDynamicBackendTestImpl);
+ARMNN_SIMPLE_TEST_CASE(SampleDynamicBackendEndToEnd, SampleDynamicBackendEndToEndTestImpl);
+#endif
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 4238ef6f7d..1276776a4d 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -19,7 +19,6 @@
#include <string>
#include <memory>
-#include <string>
#include <boost/test/unit_test.hpp>
#include <boost/filesystem.hpp>
@@ -1438,3 +1437,123 @@ void CreateReferenceDynamicBackendTestImpl()
}
#endif
+
+#if defined(SAMPLE_DYNAMIC_BACKEND_ENABLED)
+void CreateSampleDynamicBackendTestImpl()
+{
+ using namespace armnn;
+
+ // Using the path override in CreationOptions to load the reference dynamic backend
+ IRuntime::CreationOptions creationOptions;
+ IRuntimePtr runtime = IRuntime::Create(creationOptions);
+
+ const BackendRegistry& backendRegistry = BackendRegistryInstance();
+ BOOST_TEST(backendRegistry.Size() >= 1);
+
+ BackendIdSet backendIds = backendRegistry.GetBackendIds();
+ BOOST_TEST((backendIds.find("SampleDynamic") != backendIds.end()));
+
+ const DeviceSpec& deviceSpec = *boost::polymorphic_downcast<const DeviceSpec*>(&runtime->GetDeviceSpec());
+ BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
+ BOOST_TEST(supportedBackendIds.size()>= 1);
+ BOOST_TEST((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end()));
+
+ // Get the factory function
+ auto sampleDynamicBackendFactoryFunction = backendRegistry.GetFactory("SampleDynamic");
+ BOOST_TEST((sampleDynamicBackendFactoryFunction != nullptr));
+
+ // Use the factory function to create an instance of the dynamic backend
+ IBackendInternalUniquePtr sampleDynamicBackend = sampleDynamicBackendFactoryFunction();
+ BOOST_TEST((sampleDynamicBackend != nullptr));
+ BOOST_TEST((sampleDynamicBackend->GetId() == "SampleDynamic"));
+
+ // Test the backend instance by querying the layer support
+ IBackendInternal::ILayerSupportSharedPtr sampleLayerSupport = sampleDynamicBackend->GetLayerSupport();
+ BOOST_TEST((sampleLayerSupport != nullptr));
+
+ TensorShape inputShape { 1, 16, 16, 16 };
+ TensorShape outputShape{ 1, 16, 16, 16 };
+ TensorShape weightShape{ 16, 1, 1, 16 };
+ TensorInfo inputInfo (inputShape, DataType::Float32);
+ TensorInfo outputInfo(outputShape, DataType::Float32);
+ TensorInfo weightInfo(weightShape, DataType::Float32);
+ Convolution2dDescriptor convolution2dDescriptor;
+ bool sampleConvolution2dSupported =
+ sampleLayerSupport->IsConvolution2dSupported(inputInfo,
+ outputInfo,
+ convolution2dDescriptor,
+ weightInfo,
+ EmptyOptional());
+ BOOST_TEST(!sampleConvolution2dSupported);
+
+ // Test the backend instance by creating a workload
+ IBackendInternal::IWorkloadFactoryPtr sampleWorkloadFactory = sampleDynamicBackend->CreateWorkloadFactory();
+ BOOST_TEST((sampleWorkloadFactory != nullptr));
+
+ // Create dummy settings for the workload
+ AdditionQueueDescriptor additionQueueDescriptor;
+ WorkloadInfo workloadInfo
+ {
+ { inputInfo, inputInfo },
+ { outputInfo }
+ };
+
+ // Create a addition workload
+ auto workload = sampleWorkloadFactory->CreateAddition(additionQueueDescriptor, workloadInfo);
+ BOOST_TEST((workload != nullptr));
+}
+
+void SampleDynamicBackendEndToEndTestImpl()
+{
+ using namespace armnn;
+ using namespace boost::filesystem;
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input0 = net->AddInputLayer(0);
+ IConnectableLayer* input1 = net->AddInputLayer(1);
+ IConnectableLayer* add = net->AddAdditionLayer();
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+ input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+ add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32);
+ input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, {"SampleDynamic"}, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ std::vector<float> input0Data{ 5.0f, 3.0f };
+ std::vector<float> input1Data{ 10.0f, 8.0f };
+ std::vector<float> expectedOutputData{ 15.0f, 11.0f };
+ std::vector<float> outputData(2);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input0Data.data())},
+ {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results.
+ BOOST_TEST(outputData == expectedOutputData);
+}
+#endif