275 TEST_CASE(
"RegisterFactories")
277 TestBackendA backendA;
278 TestBackendB backendB;
280 CHECK(backendA.GetHandleFactoryPreferences()[0] ==
"TestHandleFactoryA1");
281 CHECK(backendA.GetHandleFactoryPreferences()[1] ==
"TestHandleFactoryA2");
282 CHECK(backendA.GetHandleFactoryPreferences()[2] ==
"TestHandleFactoryB1");
283 CHECK(backendA.GetHandleFactoryPreferences()[3] ==
"TestHandleFactoryD1");
286 backendA.RegisterTensorHandleFactories(registry);
287 backendB.RegisterTensorHandleFactories(registry);
289 CHECK((registry.GetFactory(
"Non-existing Backend") ==
nullptr));
290 CHECK((registry.GetFactory(
"TestHandleFactoryA1") !=
nullptr));
291 CHECK((registry.GetFactory(
"TestHandleFactoryA2") !=
nullptr));
292 CHECK((registry.GetFactory(
"TestHandleFactoryB1") !=
nullptr));
295 TEST_CASE(
"TensorHandleSelectionStrategy")
297 auto backendA = std::make_unique<TestBackendA>();
298 auto backendB = std::make_unique<TestBackendB>();
299 auto backendC = std::make_unique<TestBackendC>();
300 auto backendD = std::make_unique<TestBackendD>();
303 backendA->RegisterTensorHandleFactories(registry);
304 backendB->RegisterTensorHandleFactories(registry);
305 backendC->RegisterTensorHandleFactories(registry);
306 backendD->RegisterTensorHandleFactories(registry);
309 backends[
"BackendA"] = std::move(backendA);
310 backends[
"BackendB"] = std::move(backendB);
311 backends[
"BackendC"] = std::move(backendC);
312 backends[
"BackendD"] = std::move(backendD);
343 std::vector<std::string> errors;
346 CHECK(result.m_Error ==
false);
347 CHECK(result.m_Warning ==
false);
375 if (layer->
GetType() == LayerType::MemCopy)
380 CHECK(copyCount == 1);
386 if (layer->
GetType() == LayerType::MemImport)
391 CHECK(importCount == 1);
394 TEST_CASE(
"RegisterCopyAndImportFactoryPairTest")
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
int Connect(InputSlot &destination)
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
void SetBackendId(const BackendId &id)
void RegisterCopyAndImportFactoryPair(ITensorHandleFactory::FactoryId copyFactoryId, ITensorHandleFactory::FactoryId importFactoryId)
Register a pair of TensorHandleFactory Id for Memory Copy and TensorHandleFactory Id for Memory Impor...
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
A layer user-provided data can be bound to (e.g. inputs, outputs).
void ForEachLayer(Func func) const
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry ®istry, bool importEnabled, bool exportEnabled, Optional< std::vector< std::string > &> errMessages)
This layer represents a softmax operation.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
ITensorHandleFactory::FactoryId GetMatchingImportFactoryId(ITensorHandleFactory::FactoryId copyFactoryId)
Get a matching TensorHandleFatory Id for Memory Import given TensorHandleFactory Id for Memory Copy...
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Graph & TopologicalSort()
Sorts layers in topological order and return this.
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry ®istry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap