From 867eba59ffd2276086a14f7b2632b390c94392d3 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 3 Feb 2020 12:29:56 +0000 Subject: IVGCVSW-4399 Create Sample Dynamic backend * Move IWorkload and WorkloadInfo to include/armnn/backends * Add simple sample dynamic backend with addition workload * Add sample example to run dynamic backend * Unit tests Signed-off-by: Narumol Prangnawarat Change-Id: I0753ce35b8e8a6223a1471388b49246d82438a44 --- cmake/GlobalConfig.cmake | 4 + include/armnn/backends/CMakeLists.txt | 4 +- include/armnn/backends/IWorkload.hpp | 26 ++++ include/armnn/backends/WorkloadInfo.hpp | 22 ++++ samples/CMakeLists.txt | 5 + samples/DynamicSample.cpp | 80 ++++++++++++ src/backends/backendsCommon/Workload.hpp | 16 +-- src/backends/backendsCommon/WorkloadData.hpp | 3 +- src/backends/backendsCommon/WorkloadInfo.hpp | 17 +-- .../backendsCommon/test/DynamicBackendTests.cpp | 5 + .../backendsCommon/test/DynamicBackendTests.hpp | 121 +++++++++++++++++- src/dynamic/sample/CMakeLists.txt | 34 +++++ .../sample/SampleDynamicAdditionWorkload.cpp | 54 ++++++++ .../sample/SampleDynamicAdditionWorkload.hpp | 21 ++++ src/dynamic/sample/SampleDynamicBackend.cpp | 91 ++++++++++++++ src/dynamic/sample/SampleDynamicBackend.hpp | 15 +++ src/dynamic/sample/SampleDynamicLayerSupport.cpp | 51 ++++++++ src/dynamic/sample/SampleDynamicLayerSupport.hpp | 28 +++++ .../sample/SampleDynamicWorkloadFactory.cpp | 75 +++++++++++ .../sample/SampleDynamicWorkloadFactory.hpp | 62 ++++++++++ src/dynamic/sample/SampleMemoryManager.cpp | 95 ++++++++++++++ src/dynamic/sample/SampleMemoryManager.hpp | 59 +++++++++ src/dynamic/sample/SampleTensorHandle.cpp | 137 +++++++++++++++++++++ src/dynamic/sample/SampleTensorHandle.hpp | 78 ++++++++++++ 24 files changed, 1072 insertions(+), 31 deletions(-) create mode 100644 include/armnn/backends/IWorkload.hpp create mode 100644 include/armnn/backends/WorkloadInfo.hpp create mode 100644 samples/DynamicSample.cpp create mode 100644 src/dynamic/sample/CMakeLists.txt create mode 100644 src/dynamic/sample/SampleDynamicAdditionWorkload.cpp create mode 100644 src/dynamic/sample/SampleDynamicAdditionWorkload.hpp create mode 100644 src/dynamic/sample/SampleDynamicBackend.cpp create mode 100644 src/dynamic/sample/SampleDynamicBackend.hpp create mode 100644 src/dynamic/sample/SampleDynamicLayerSupport.cpp create mode 100644 src/dynamic/sample/SampleDynamicLayerSupport.hpp create mode 100644 src/dynamic/sample/SampleDynamicWorkloadFactory.cpp create mode 100644 src/dynamic/sample/SampleDynamicWorkloadFactory.hpp create mode 100644 src/dynamic/sample/SampleMemoryManager.cpp create mode 100644 src/dynamic/sample/SampleMemoryManager.hpp create mode 100644 src/dynamic/sample/SampleTensorHandle.cpp create mode 100644 src/dynamic/sample/SampleTensorHandle.hpp diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake index ccf0eccd29..aa2ebc745d 100644 --- a/cmake/GlobalConfig.cmake +++ b/cmake/GlobalConfig.cmake @@ -307,6 +307,10 @@ if(DYNAMIC_BACKEND_PATHS) add_definitions(-DARMNN_DYNAMIC_BACKEND_ENABLED) endif() +if(SAMPLE_DYNAMIC_BACKEND) + add_definitions(-DSAMPLE_DYNAMIC_BACKEND_ENABLED) +endif() + # Streamline annotate if(PROFILING_BACKEND_STREAMLINE) include_directories("${GATOR_ROOT}/annotate") diff --git a/include/armnn/backends/CMakeLists.txt b/include/armnn/backends/CMakeLists.txt index 90a022aad7..94e757fe8a 100644 --- a/include/armnn/backends/CMakeLists.txt +++ b/include/armnn/backends/CMakeLists.txt @@ -8,10 +8,12 @@ list(APPEND armnnBackendsAPI_sources DynamicBackend.hpp IBackendInternal.hpp IBackendContext.hpp - ITensorHandleFactory.hpp IMemoryManager.hpp ITensorHandle.hpp + ITensorHandleFactory.hpp + IWorkload.hpp OptimizationViews.hpp + WorkloadInfo.hpp profiling/IBackendProfiling.hpp profiling/IBackendProfilingContext.hpp ) diff --git a/include/armnn/backends/IWorkload.hpp b/include/armnn/backends/IWorkload.hpp new file mode 100644 index 0000000000..0bd8d2db75 --- /dev/null +++ b/include/armnn/backends/IWorkload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +namespace armnn +{ + +/// Workload interface to enqueue a layer computation. +class IWorkload { +public: + virtual ~IWorkload() {} + + virtual void PostAllocationConfigure() = 0; + + virtual void Execute() const = 0; + + virtual profiling::ProfilingGuid GetGuid() const = 0; + + virtual void RegisterDebugCallback(const DebugCallbackFunction & /*func*/) {} +}; + +} //namespace armnn diff --git a/include/armnn/backends/WorkloadInfo.hpp b/include/armnn/backends/WorkloadInfo.hpp new file mode 100644 index 0000000000..edf3581791 --- /dev/null +++ b/include/armnn/backends/WorkloadInfo.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include + +namespace armnn +{ + +/// Contains information about inputs and outputs to a layer. +/// This is needed at construction of workloads, but are not stored. +struct WorkloadInfo +{ + std::vector m_InputTensorInfos; + std::vector m_OutputTensorInfos; +}; + +} //namespace armnn diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index 640d5cd705..5505de0bec 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -2,3 +2,8 @@ if(BUILD_SAMPLE_APP AND ARMNNREF) add_executable(SimpleSample SimpleSample.cpp) target_link_libraries(SimpleSample armnn ${CMAKE_THREAD_LIBS_INIT}) endif() + +if(SAMPLE_DYNAMIC_BACKEND) + add_executable(DynamicSample DynamicSample.cpp) + target_link_libraries(DynamicSample armnn ${CMAKE_THREAD_LIBS_INIT}) +endif() diff --git a/samples/DynamicSample.cpp b/samples/DynamicSample.cpp new file mode 100644 index 0000000000..3abe12f309 --- /dev/null +++ b/samples/DynamicSample.cpp @@ -0,0 +1,80 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include +#include +#include +#include + +#include + +/// A simple example of using the ArmNN SDK API. In this sample, the users single input number is multiplied by 1.0f +/// using a fully connected layer with a single neuron to produce an output number that is the same as the input. +int main() +{ + using namespace armnn; + + // Construct ArmNN network + armnn::NetworkId networkIdentifier; + INetworkPtr myNetwork = INetwork::Create(); + + IConnectableLayer* input0 = myNetwork->AddInputLayer(0); + IConnectableLayer* input1 = myNetwork->AddInputLayer(1); + IConnectableLayer* add = myNetwork->AddAdditionLayer(); + IConnectableLayer* output = myNetwork->AddOutputLayer(0); + + input0->GetOutputSlot(0).Connect(add->GetInputSlot(0)); + input1->GetOutputSlot(0).Connect(add->GetInputSlot(1)); + add->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32); + input0->GetOutputSlot(0).SetTensorInfo(tensorInfo); + input1->GetOutputSlot(0).SetTensorInfo(tensorInfo); + add->GetOutputSlot(0).SetTensorInfo(tensorInfo); + + // Create ArmNN runtime + IRuntime::CreationOptions options; // default options + armnn::IRuntimePtr run(armnn::IRuntime::Create(options)); + + // Optimise ArmNN network + armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"SampleDynamic"}, run->GetDeviceSpec()); + if (!optNet) + { + // This shouldn't happen for this simple sample, with reference backend. + // But in general usage Optimize could fail if the hardware at runtime cannot + // support the model that has been provided. + std::cerr << "Error: Failed to optimise the input network." << std::endl; + return 1; + } + + // Load graph into runtime + run->LoadNetwork(networkIdentifier, std::move(optNet)); + + // input data + std::vector input0Data + { + 5.0f, 3.0f + }; + std::vector input1Data + { + 10.0f, 8.0f + }; + std::vector outputData(2); + + InputTensors inputTensors + { + {0,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input0Data.data())}, + {1,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())} + }; + OutputTensors outputTensors + { + {0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())} + }; + + // Execute network + run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors); + + std::cout << "Addition operator result is {" << outputData[0] << "," << outputData[1] << "}" << std::endl; + return 0; +} diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp index f7895a6f1d..8ec09f98b6 100644 --- a/src/backends/backendsCommon/Workload.hpp +++ b/src/backends/backendsCommon/Workload.hpp @@ -7,7 +7,7 @@ #include "WorkloadData.hpp" #include "WorkloadInfo.hpp" -#include +#include #include #include @@ -16,20 +16,6 @@ namespace armnn { -/// Workload interface to enqueue a layer computation. -class IWorkload -{ -public: - virtual ~IWorkload() {} - - virtual void PostAllocationConfigure() = 0; - virtual void Execute() const = 0; - - virtual profiling::ProfilingGuid GetGuid() const = 0; - - virtual void RegisterDebugCallback(const DebugCallbackFunction& /*func*/) {} -}; - // NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template // in the various workload factories. // There should never be an instantiation of a NullWorkload. diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index c5fcf15c3b..46681e9def 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -31,7 +31,8 @@ struct QueueDescriptor std::vector m_Outputs; void ValidateInputsOutputs(const std::string& descName, - unsigned int numExpectedIn, unsigned int numExpectedOut) const; + unsigned int numExpectedIn, + unsigned int numExpectedOut) const; protected: diff --git a/src/backends/backendsCommon/WorkloadInfo.hpp b/src/backends/backendsCommon/WorkloadInfo.hpp index 304bc0bf06..cac147c500 100644 --- a/src/backends/backendsCommon/WorkloadInfo.hpp +++ b/src/backends/backendsCommon/WorkloadInfo.hpp @@ -2,17 +2,8 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#pragma once -namespace armnn -{ - -/// Contains information about inputs and outputs to a layer. -/// This is needed at construction of workloads, but are not stored. -struct WorkloadInfo -{ - std::vector m_InputTensorInfos; - std::vector m_OutputTensorInfos; -}; - -} //namespace armnn +// This file is depricated and will be removed soon. +// Please use the new header in armnn/backends instead. +// This will use the new armnn/backends header. +#include diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.cpp b/src/backends/backendsCommon/test/DynamicBackendTests.cpp index 40e063d8c4..bb1a5cda6c 100644 --- a/src/backends/backendsCommon/test/DynamicBackendTests.cpp +++ b/src/backends/backendsCommon/test/DynamicBackendTests.cpp @@ -71,4 +71,9 @@ ARMNN_SIMPLE_TEST_CASE(CreateReferenceDynamicBackend, CreateReferenceDynamicBack #endif +#if defined(SAMPLE_DYNAMIC_BACKEND_ENABLED) +ARMNN_SIMPLE_TEST_CASE(CreateSampleDynamicBackend, CreateSampleDynamicBackendTestImpl); +ARMNN_SIMPLE_TEST_CASE(SampleDynamicBackendEndToEnd, SampleDynamicBackendEndToEndTestImpl); +#endif + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp index 4238ef6f7d..1276776a4d 100644 --- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp +++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp @@ -19,7 +19,6 @@ #include #include -#include #include #include @@ -1438,3 +1437,123 @@ void CreateReferenceDynamicBackendTestImpl() } #endif + +#if defined(SAMPLE_DYNAMIC_BACKEND_ENABLED) +void CreateSampleDynamicBackendTestImpl() +{ + using namespace armnn; + + // Using the path override in CreationOptions to load the reference dynamic backend + IRuntime::CreationOptions creationOptions; + IRuntimePtr runtime = IRuntime::Create(creationOptions); + + const BackendRegistry& backendRegistry = BackendRegistryInstance(); + BOOST_TEST(backendRegistry.Size() >= 1); + + BackendIdSet backendIds = backendRegistry.GetBackendIds(); + BOOST_TEST((backendIds.find("SampleDynamic") != backendIds.end())); + + const DeviceSpec& deviceSpec = *boost::polymorphic_downcast(&runtime->GetDeviceSpec()); + BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends(); + BOOST_TEST(supportedBackendIds.size()>= 1); + BOOST_TEST((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end())); + + // Get the factory function + auto sampleDynamicBackendFactoryFunction = backendRegistry.GetFactory("SampleDynamic"); + BOOST_TEST((sampleDynamicBackendFactoryFunction != nullptr)); + + // Use the factory function to create an instance of the dynamic backend + IBackendInternalUniquePtr sampleDynamicBackend = sampleDynamicBackendFactoryFunction(); + BOOST_TEST((sampleDynamicBackend != nullptr)); + BOOST_TEST((sampleDynamicBackend->GetId() == "SampleDynamic")); + + // Test the backend instance by querying the layer support + IBackendInternal::ILayerSupportSharedPtr sampleLayerSupport = sampleDynamicBackend->GetLayerSupport(); + BOOST_TEST((sampleLayerSupport != nullptr)); + + TensorShape inputShape { 1, 16, 16, 16 }; + TensorShape outputShape{ 1, 16, 16, 16 }; + TensorShape weightShape{ 16, 1, 1, 16 }; + TensorInfo inputInfo (inputShape, DataType::Float32); + TensorInfo outputInfo(outputShape, DataType::Float32); + TensorInfo weightInfo(weightShape, DataType::Float32); + Convolution2dDescriptor convolution2dDescriptor; + bool sampleConvolution2dSupported = + sampleLayerSupport->IsConvolution2dSupported(inputInfo, + outputInfo, + convolution2dDescriptor, + weightInfo, + EmptyOptional()); + BOOST_TEST(!sampleConvolution2dSupported); + + // Test the backend instance by creating a workload + IBackendInternal::IWorkloadFactoryPtr sampleWorkloadFactory = sampleDynamicBackend->CreateWorkloadFactory(); + BOOST_TEST((sampleWorkloadFactory != nullptr)); + + // Create dummy settings for the workload + AdditionQueueDescriptor additionQueueDescriptor; + WorkloadInfo workloadInfo + { + { inputInfo, inputInfo }, + { outputInfo } + }; + + // Create a addition workload + auto workload = sampleWorkloadFactory->CreateAddition(additionQueueDescriptor, workloadInfo); + BOOST_TEST((workload != nullptr)); +} + +void SampleDynamicBackendEndToEndTestImpl() +{ + using namespace armnn; + using namespace boost::filesystem; + // Create runtime in which test will run + IRuntime::CreationOptions options; + IRuntimePtr runtime(IRuntime::Create(options)); + + // Builds up the structure of the network. + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input0 = net->AddInputLayer(0); + IConnectableLayer* input1 = net->AddInputLayer(1); + IConnectableLayer* add = net->AddAdditionLayer(); + IConnectableLayer* output = net->AddOutputLayer(0); + + input0->GetOutputSlot(0).Connect(add->GetInputSlot(0)); + input1->GetOutputSlot(0).Connect(add->GetInputSlot(1)); + add->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32); + input0->GetOutputSlot(0).SetTensorInfo(tensorInfo); + input1->GetOutputSlot(0).SetTensorInfo(tensorInfo); + add->GetOutputSlot(0).SetTensorInfo(tensorInfo); + + // optimize the network + IOptimizedNetworkPtr optNet = Optimize(*net, {"SampleDynamic"}, runtime->GetDeviceSpec()); + + // Loads it into the runtime. + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optNet)); + + std::vector input0Data{ 5.0f, 3.0f }; + std::vector input1Data{ 10.0f, 8.0f }; + std::vector expectedOutputData{ 15.0f, 11.0f }; + std::vector outputData(2); + + InputTensors inputTensors + { + {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input0Data.data())}, + {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())} + }; + OutputTensors outputTensors + { + {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} + }; + + // Does the inference. + runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + + // Checks the results. + BOOST_TEST(outputData == expectedOutputData); +} +#endif diff --git a/src/dynamic/sample/CMakeLists.txt b/src/dynamic/sample/CMakeLists.txt new file mode 100644 index 0000000000..aeb870c32d --- /dev/null +++ b/src/dynamic/sample/CMakeLists.txt @@ -0,0 +1,34 @@ +# +# Copyright © 2020 Arm Ltd. All rights reserved. +# SPDX-License-Identifier: MIT +# + +cmake_minimum_required (VERSION 3.0.2) +project(sample-dynamic) + +set(CMAKE_POSITION_INDEPENDENT_CODE ON) + +list(APPEND armnnSampleDynamicBackend_sources + SampleDynamicAdditionWorkload.cpp + SampleDynamicAdditionWorkload.hpp + SampleDynamicBackend.cpp + SampleDynamicBackend.hpp + SampleDynamicLayerSupport.cpp + SampleDynamicLayerSupport.hpp + SampleDynamicWorkloadFactory.cpp + SampleDynamicWorkloadFactory.hpp + SampleMemoryManager.cpp + SampleMemoryManager.hpp + SampleTensorHandle.cpp + SampleTensorHandle.hpp +) + +add_library(Arm_SampleDynamic_backend MODULE ${armnnSampleDynamicBackend_sources}) + +target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../include) +target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../third-party) +target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/armnn) +target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/armnnUtils) +target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/backends) +target_include_directories(Arm_SampleDynamic_backend PRIVATE ${PROJECT_SOURCE_DIR}/../../../src/profiling) + diff --git a/src/dynamic/sample/SampleDynamicAdditionWorkload.cpp b/src/dynamic/sample/SampleDynamicAdditionWorkload.cpp new file mode 100644 index 0000000000..0fa57a7e07 --- /dev/null +++ b/src/dynamic/sample/SampleDynamicAdditionWorkload.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include "SampleDynamicAdditionWorkload.hpp" +#include "SampleTensorHandle.hpp" + +namespace armnn +{ + +inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use RefTensorHandles for inputs and outputs + const SampleTensorHandle* sampleTensorHandle = + static_cast(tensorHandle); + return sampleTensorHandle->GetTensorInfo(); +} + +const float* GetInputTensorData(unsigned int idx, const AdditionQueueDescriptor& data) +{ + const ITensorHandle* tensorHandle = data.m_Inputs[idx]; + return reinterpret_cast(tensorHandle->Map()); +} + +float* GetOutputTensorData(unsigned int idx, const AdditionQueueDescriptor& data) +{ + ITensorHandle* tensorHandle = data.m_Outputs[idx]; + return reinterpret_cast(tensorHandle->Map()); +} + +SampleDynamicAdditionWorkload::SampleDynamicAdditionWorkload(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{} + +void SampleDynamicAdditionWorkload::Execute() const +{ + const TensorInfo& info = GetTensorInfo(m_Data.m_Inputs[0]); + unsigned int num = info.GetNumElements(); + + const float* inputData0 = GetInputTensorData(0, m_Data); + const float* inputData1 = GetInputTensorData(1, m_Data); + float* outputData = GetOutputTensorData(0, m_Data); + + for (unsigned int i = 0; i < num; ++i) + { + outputData[i] = inputData0[i] + inputData1[i]; + } +} + +} // namespace armnn diff --git a/src/dynamic/sample/SampleDynamicAdditionWorkload.hpp b/src/dynamic/sample/SampleDynamicAdditionWorkload.hpp new file mode 100644 index 0000000000..8362588c39 --- /dev/null +++ b/src/dynamic/sample/SampleDynamicAdditionWorkload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include +#include + +namespace armnn +{ + +class SampleDynamicAdditionWorkload : public BaseWorkload +{ +public: + SampleDynamicAdditionWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; +}; + +} // namespace armnn diff --git a/src/dynamic/sample/SampleDynamicBackend.cpp b/src/dynamic/sample/SampleDynamicBackend.cpp new file mode 100644 index 0000000000..1863c1c98c --- /dev/null +++ b/src/dynamic/sample/SampleDynamicBackend.cpp @@ -0,0 +1,91 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SampleDynamicBackend.hpp" +#include "SampleDynamicLayerSupport.hpp" +#include "SampleDynamicWorkloadFactory.hpp" +#include "SampleMemoryManager.hpp" + +#include +#include + +namespace armnn +{ + +constexpr const char * SampleDynamicBackendId() { return "SampleDynamic"; } + +class SampleDynamicBackend : public IBackendInternal +{ +public: + SampleDynamicBackend() = default; + ~SampleDynamicBackend() = default; + + static const BackendId& GetIdStatic() + { + static const BackendId s_Id{SampleDynamicBackendId()}; + return s_Id; + } + + const BackendId& GetId() const override { return GetIdStatic(); } + + IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override + { + return std::make_unique(); + } + + IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory( + const IMemoryManagerSharedPtr& memoryManager) const override + { + return std::make_unique(); + } + + IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override + { + static ILayerSupportSharedPtr layerSupport{new SampleDynamicLayerSupport}; + return layerSupport; + } + + IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override + { + return IBackendContextPtr{}; + } + + OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override + { + OptimizationViews optimizationViews; + + optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph)); + + return optimizationViews; + } + +}; + +} // namespace armnn + +const char* GetBackendId() +{ + return armnn::SampleDynamicBackend::GetIdStatic().Get().c_str(); +} + +void GetVersion(uint32_t* outMajor, uint32_t* outMinor) +{ + if (!outMajor || !outMinor) + { + return; + } + + armnn::BackendVersion apiVersion = armnn::IBackendInternal::GetApiVersion(); + + *outMajor = apiVersion.m_Major; + *outMinor = apiVersion.m_Minor; +} + +void* BackendFactory() +{ + return new armnn::SampleDynamicBackend(); +} + + diff --git a/src/dynamic/sample/SampleDynamicBackend.hpp b/src/dynamic/sample/SampleDynamicBackend.hpp new file mode 100644 index 0000000000..8be1038e03 --- /dev/null +++ b/src/dynamic/sample/SampleDynamicBackend.hpp @@ -0,0 +1,15 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +extern "C" +{ +const char* GetBackendId(); +void GetVersion(uint32_t* outMajor, uint32_t* outMinor); +void* BackendFactory(); +} diff --git a/src/dynamic/sample/SampleDynamicLayerSupport.cpp b/src/dynamic/sample/SampleDynamicLayerSupport.cpp new file mode 100644 index 0000000000..031d39cbae --- /dev/null +++ b/src/dynamic/sample/SampleDynamicLayerSupport.cpp @@ -0,0 +1,51 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SampleDynamicLayerSupport.hpp" + +#include +#include +#include + +namespace armnn +{ + +bool SampleDynamicLayerSupport::IsInputSupported(const TensorInfo& input, + Optional reasonIfUnsupported) const +{ + return true; +} + +bool SampleDynamicLayerSupport::IsOutputSupported(const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return true; +} + +bool SampleDynamicLayerSupport::IsAdditionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + + if (input0.GetDataType() != armnn::DataType::Float32) + { + return false; + } + + if (input0.GetDataType() != input1.GetDataType()) + { + return false; + } + + if (input0.GetDataType() != output.GetDataType()) + { + return false; + } + + return true; +} + +} // namespace armnn diff --git a/src/dynamic/sample/SampleDynamicLayerSupport.hpp b/src/dynamic/sample/SampleDynamicLayerSupport.hpp new file mode 100644 index 0000000000..f6aa0cb91f --- /dev/null +++ b/src/dynamic/sample/SampleDynamicLayerSupport.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnn +{ + +class SampleDynamicLayerSupport : public LayerSupportBase +{ +public: + bool IsAdditionSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsInputSupported(const TensorInfo& input, + Optional reasonIfUnsupported) const override; + + bool IsOutputSupported(const TensorInfo& output, + Optional reasonIfUnsupported) const override; +}; + +} // namespace armnn diff --git a/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp b/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp new file mode 100644 index 0000000000..0fb5504f41 --- /dev/null +++ b/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp @@ -0,0 +1,75 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include + +#include "SampleDynamicAdditionWorkload.hpp" +#include "SampleDynamicBackend.hpp" +#include "SampleDynamicWorkloadFactory.hpp" +#include "SampleTensorHandle.hpp" + +namespace armnn +{ + +namespace +{ +static const BackendId s_Id{ GetBackendId() }; +} + +SampleDynamicWorkloadFactory::SampleDynamicWorkloadFactory(const std::shared_ptr& memoryManager) + : m_MemoryManager(memoryManager) +{ +} + +SampleDynamicWorkloadFactory::SampleDynamicWorkloadFactory() + : m_MemoryManager(new SampleMemoryManager()) +{ +} + +const BackendId& SampleDynamicWorkloadFactory::GetBackendId() const +{ + return s_Id; +} + +bool SampleDynamicWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer, + Optional dataType, + std::string& outReasonIfUnsupported) +{ + return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported); +} + +std::unique_ptr SampleDynamicWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo, + const bool isMemoryManaged) const +{ + return std::make_unique(tensorInfo); +} + +std::unique_ptr SampleDynamicWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo, + DataLayout dataLayout, + const bool isMemoryManaged) const +{ + return std::make_unique(tensorInfo); +} + +std::unique_ptr SampleDynamicWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + +std::unique_ptr SampleDynamicWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + +std::unique_ptr SampleDynamicWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + +} // namespace armnn diff --git a/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp new file mode 100644 index 0000000000..88b67987e1 --- /dev/null +++ b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp @@ -0,0 +1,62 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "SampleMemoryManager.hpp" + +#include +#include + +namespace armnn +{ + +// Sample Dynamic workload factory. +class SampleDynamicWorkloadFactory : public IWorkloadFactory +{ +public: + explicit SampleDynamicWorkloadFactory(const std::shared_ptr& memoryManager); + SampleDynamicWorkloadFactory(); + + ~SampleDynamicWorkloadFactory() {} + + const BackendId& GetBackendId() const override; + + static bool IsLayerSupported(const IConnectableLayer& layer, + Optional dataType, + std::string& outReasonIfUnsupported); + + bool SupportsSubTensors() const override { return false; } + + std::unique_ptr CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const override + { + boost::ignore_unused(parent, subTensorShape, subTensorOrigin); + return nullptr; + } + + std::unique_ptr CreateTensorHandle(const TensorInfo& tensorInfo, + const bool IsMemoryManaged = true) const override; + + std::unique_ptr CreateTensorHandle(const TensorInfo& tensorInfo, + DataLayout dataLayout, + const bool IsMemoryManaged = true) const override; + + std::unique_ptr CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + + std::unique_ptr CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + std::unique_ptr CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + +private: + mutable std::shared_ptr m_MemoryManager; + +}; + +} // namespace armnn diff --git a/src/dynamic/sample/SampleMemoryManager.cpp b/src/dynamic/sample/SampleMemoryManager.cpp new file mode 100644 index 0000000000..30a7548b02 --- /dev/null +++ b/src/dynamic/sample/SampleMemoryManager.cpp @@ -0,0 +1,95 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SampleMemoryManager.hpp" + +#include + +namespace armnn +{ + +SampleMemoryManager::SampleMemoryManager() +{} + +SampleMemoryManager::~SampleMemoryManager() +{} + +SampleMemoryManager::Pool* SampleMemoryManager::Manage(unsigned int numBytes) +{ + if (!m_FreePools.empty()) + { + Pool* res = m_FreePools.back(); + m_FreePools.pop_back(); + res->Reserve(numBytes); + return res; + } + else + { + m_Pools.push_front(Pool(numBytes)); + return &m_Pools.front(); + } +} + +void SampleMemoryManager::Allocate(SampleMemoryManager::Pool* pool) +{ + m_FreePools.push_back(pool); +} + +void* SampleMemoryManager::GetPointer(SampleMemoryManager::Pool* pool) +{ + return pool->GetPointer(); +} + +void SampleMemoryManager::Acquire() +{ + for (Pool &pool: m_Pools) + { + pool.Acquire(); + } +} + +void SampleMemoryManager::Release() +{ + for (Pool &pool: m_Pools) + { + pool.Release(); + } +} + +SampleMemoryManager::Pool::Pool(unsigned int numBytes) + : m_Size(numBytes), + m_Pointer(nullptr) +{} + +SampleMemoryManager::Pool::~Pool() +{ + if (m_Pointer) + { + Release(); + } +} + +void* SampleMemoryManager::Pool::GetPointer() +{ + return m_Pointer; +} + +void SampleMemoryManager::Pool::Reserve(unsigned int numBytes) +{ + m_Size = std::max(m_Size, numBytes); +} + +void SampleMemoryManager::Pool::Acquire() +{ + m_Pointer = ::operator new(size_t(m_Size)); +} + +void SampleMemoryManager::Pool::Release() +{ + ::operator delete(m_Pointer); + m_Pointer = nullptr; +} + +} diff --git a/src/dynamic/sample/SampleMemoryManager.hpp b/src/dynamic/sample/SampleMemoryManager.hpp new file mode 100644 index 0000000000..0993bc1e2b --- /dev/null +++ b/src/dynamic/sample/SampleMemoryManager.hpp @@ -0,0 +1,59 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include + +namespace armnn +{ + +// An implementation of IMemoryManager to be used with SampleTensorHandle +class SampleMemoryManager : public IMemoryManager +{ +public: + SampleMemoryManager(); + virtual ~SampleMemoryManager(); + + class Pool; + + Pool* Manage(unsigned int numBytes); + + void Allocate(Pool *pool); + + void* GetPointer(Pool *pool); + + void Acquire() override; + void Release() override; + + class Pool + { + public: + Pool(unsigned int numBytes); + ~Pool(); + + void Acquire(); + void Release(); + + void* GetPointer(); + + void Reserve(unsigned int numBytes); + + private: + unsigned int m_Size; + void* m_Pointer; + }; + +private: + SampleMemoryManager(const SampleMemoryManager&) = delete; // Noncopyable + SampleMemoryManager& operator=(const SampleMemoryManager&) = delete; // Noncopyable + + std::forward_list m_Pools; + std::vector m_FreePools; +}; + +} diff --git a/src/dynamic/sample/SampleTensorHandle.cpp b/src/dynamic/sample/SampleTensorHandle.cpp new file mode 100644 index 0000000000..48f8cf44fa --- /dev/null +++ b/src/dynamic/sample/SampleTensorHandle.cpp @@ -0,0 +1,137 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SampleTensorHandle.hpp" + +namespace armnn +{ + +SampleTensorHandle::SampleTensorHandle(const TensorInfo &tensorInfo, + std::shared_ptr &memoryManager) + : m_TensorInfo(tensorInfo), + m_MemoryManager(memoryManager), + m_Pool(nullptr), + m_UnmanagedMemory(nullptr), + m_ImportFlags(static_cast(MemorySource::Undefined)), + m_Imported(false) +{ + +} + +SampleTensorHandle::SampleTensorHandle(const TensorInfo& tensorInfo, + std::shared_ptr &memoryManager, + MemorySourceFlags importFlags) + : m_TensorInfo(tensorInfo), + m_MemoryManager(memoryManager), + m_Pool(nullptr), + m_UnmanagedMemory(nullptr), + m_ImportFlags(importFlags), + m_Imported(false) +{ + +} + +SampleTensorHandle::~SampleTensorHandle() +{ + if (!m_Pool) + { + // unmanaged + if (!m_Imported) + { + ::operator delete(m_UnmanagedMemory); + } + } +} + +void SampleTensorHandle::Manage() +{ + m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes()); +} + +void SampleTensorHandle::Allocate() +{ + if (!m_UnmanagedMemory) + { + if (!m_Pool) + { + // unmanaged + m_UnmanagedMemory = ::operator new(m_TensorInfo.GetNumBytes()); + } + else + { + m_MemoryManager->Allocate(m_Pool); + } + } + else + { + throw InvalidArgumentException("SampleTensorHandle::Allocate Trying to allocate a SampleTensorHandle" + "that already has allocated memory."); + } +} + +const void* SampleTensorHandle::Map(bool /*unused*/) const +{ + return GetPointer(); +} + +void* SampleTensorHandle::GetPointer() const +{ + if (m_UnmanagedMemory) + { + return m_UnmanagedMemory; + } + else + { + return m_MemoryManager->GetPointer(m_Pool); + } +} + +bool SampleTensorHandle::Import(void* memory, MemorySource source) +{ + + if (m_ImportFlags & static_cast(source)) + { + if (source == MemorySource::Malloc) + { + // Check memory alignment + constexpr uintptr_t alignment = sizeof(size_t); + if (reinterpret_cast(memory) % alignment) + { + if (m_Imported) + { + m_Imported = false; + m_UnmanagedMemory = nullptr; + } + + return false; + } + + // m_UnmanagedMemory not yet allocated. + if (!m_Imported && !m_UnmanagedMemory) + { + m_UnmanagedMemory = memory; + m_Imported = true; + return true; + } + + // m_UnmanagedMemory initially allocated with Allocate(). + if (!m_Imported && m_UnmanagedMemory) + { + return false; + } + + // m_UnmanagedMemory previously imported. + if (m_Imported) + { + m_UnmanagedMemory = memory; + return true; + } + } + } + + return false; +} + +} diff --git a/src/dynamic/sample/SampleTensorHandle.hpp b/src/dynamic/sample/SampleTensorHandle.hpp new file mode 100644 index 0000000000..c08edc69b7 --- /dev/null +++ b/src/dynamic/sample/SampleTensorHandle.hpp @@ -0,0 +1,78 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include "SampleMemoryManager.hpp" + +namespace armnn +{ + +// An implementation of ITensorHandle with simple "bump the pointer" memory-management behaviour +class SampleTensorHandle : public ITensorHandle +{ +public: + SampleTensorHandle(const TensorInfo& tensorInfo, std::shared_ptr &memoryManager); + + SampleTensorHandle(const TensorInfo& tensorInfo, + std::shared_ptr &memoryManager, + MemorySourceFlags importFlags); + + ~SampleTensorHandle(); + + virtual void Manage() override; + + virtual void Allocate() override; + + virtual ITensorHandle* GetParent() const override + { + return nullptr; + } + + virtual const void* Map(bool /* blocking = true */) const override; + using ITensorHandle::Map; + + virtual void Unmap() const override + {} + + TensorShape GetStrides() const override + { + return GetUnpaddedTensorStrides(m_TensorInfo); + } + + TensorShape GetShape() const override + { + return m_TensorInfo.GetShape(); + } + + const TensorInfo& GetTensorInfo() const + { + return m_TensorInfo; + } + + virtual MemorySourceFlags GetImportFlags() const override + { + return m_ImportFlags; + } + + virtual bool Import(void* memory, MemorySource source) override; + +private: + void* GetPointer() const; + + SampleTensorHandle(const SampleTensorHandle& other) = delete; // noncopyable + SampleTensorHandle& operator=(const SampleTensorHandle& other) = delete; //noncopyable + + TensorInfo m_TensorInfo; + + std::shared_ptr m_MemoryManager; + SampleMemoryManager::Pool* m_Pool; + mutable void *m_UnmanagedMemory; + MemorySourceFlags m_ImportFlags; + bool m_Imported; +}; + +} -- cgit v1.2.1