ArmNN
 22.05
ClContextSerializerTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 
10 #include <doctest/doctest.h>
11 
12 #include <fstream>
13 
14 namespace
15 {
16 
17 armnn::INetworkPtr CreateNetwork()
18 {
19  // Builds up the structure of the network.
21 
22  armnn::IConnectableLayer* input = net->AddInputLayer(0, "input");
23  armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer(armnn::SoftmaxDescriptor(), "softmax");
24  armnn::IConnectableLayer* output = net->AddOutputLayer(0, "output");
25 
26  input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
27  softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
28 
29  // Sets the input and output tensors
30  armnn::TensorInfo inputTensorInfo(armnn::TensorShape({1, 5}), armnn::DataType::QAsymmU8, 10000.0f, 1);
31  input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
32 
33  armnn::TensorInfo outputTensorInfo(armnn::TensorShape({1, 5}), armnn::DataType::QAsymmU8, 1.0f/255.0f, 0);
34  softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
35 
36  return net;
37 }
38 
39 void RunInference(armnn::NetworkId& netId, armnn::IRuntimePtr& runtime, std::vector<uint8_t>& outputData)
40 {
41  // Creates structures for input & output.
42  std::vector<uint8_t> inputData
43  {
44  1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
45  };
46 
47  armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
48  inputTensorInfo.SetConstant(true);
49  armnn::InputTensors inputTensors
50  {
51  {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
52  };
53 
54  armnn::OutputTensors outputTensors
55  {
56  {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
57  };
58 
59  // Run inference.
60  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
61 }
62 
63 std::vector<char> ReadBinaryFile(const std::string& binaryFileName)
64 {
65  std::ifstream input(binaryFileName, std::ios::binary);
66  return std::vector<char>(std::istreambuf_iterator<char>(input), {});
67 }
68 
69 } // anonymous namespace
70 
71 TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextSerializerTest")
72 {
73  // Get tmp directory and create blank file.
74  fs::path filePath = armnnUtils::Filesystem::NamedTempFile("Armnn-CachedNetworkFileTest-TempFile.bin");
75  std::string const filePathString{filePath.string()};
76  std::ofstream file { filePathString };
77 
78  // Create runtime in which test will run
81 
82  std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
83 
84  // Create two networks.
85  // net1 will serialize and save context to file.
86  // net2 will deserialize context saved from net1 and load.
87  armnn::INetworkPtr net1 = CreateNetwork();
88  armnn::INetworkPtr net2 = CreateNetwork();
89 
90  // Add specific optimizerOptions to each network.
91  armnn::OptimizerOptions optimizerOptions1;
92  armnn::OptimizerOptions optimizerOptions2;
93  armnn::BackendOptions modelOptions1("GpuAcc",
94  {{"SaveCachedNetwork", true}, {"CachedNetworkFilePath", filePathString}});
95  armnn::BackendOptions modelOptions2("GpuAcc",
96  {{"SaveCachedNetwork", false}, {"CachedNetworkFilePath", filePathString}});
97  optimizerOptions1.m_ModelOptions.push_back(modelOptions1);
98  optimizerOptions2.m_ModelOptions.push_back(modelOptions2);
99 
101  *net1, backends, runtime->GetDeviceSpec(), optimizerOptions1);
103  *net2, backends, runtime->GetDeviceSpec(), optimizerOptions2);
104  CHECK(optNet1);
105  CHECK(optNet2);
106 
107  // Cached file should be empty until net1 is loaded into runtime.
108  CHECK(fs::is_empty(filePathString));
109 
110  // Load net1 into the runtime.
111  armnn::NetworkId netId1;
112  CHECK(runtime->LoadNetwork(netId1, std::move(optNet1)) == armnn::Status::Success);
113 
114  // File should now exist and not be empty. It has been serialized.
115  CHECK(fs::exists(filePathString));
116  std::vector<char> dataSerialized = ReadBinaryFile(filePathString);
117  CHECK(dataSerialized.size() != 0);
118 
119  // Load net2 into the runtime using file and deserialize.
120  armnn::NetworkId netId2;
121  CHECK(runtime->LoadNetwork(netId2, std::move(optNet2)) == armnn::Status::Success);
122 
123  // Run inference and get output data.
124  std::vector<uint8_t> outputData1(5);
125  RunInference(netId1, runtime, outputData1);
126 
127  std::vector<uint8_t> outputData2(5);
128  RunInference(netId2, runtime, outputData2);
129 
130  // Compare outputs from both networks.
131  CHECK(std::equal(outputData1.begin(), outputData1.end(), outputData2.begin(), outputData2.end()));
132 
133  // Remove temp file created.
134  fs::remove(filePath);
135 }
ModelOptions m_ModelOptions
Definition: INetwork.hpp:233
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:49
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextSerializerTest")
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:33
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1847
int NetworkId
Definition: IRuntime.hpp:27
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:242
GPU Execution: OpenCL: ArmCompute.
ArmNN performs an optimization on each model/network before it gets loaded for execution.
Definition: INetwork.hpp:137
Struct for the users to pass backend specific options.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
fs::path NamedTempFile(const char *fileName)
Returns a path to a file in the system temporary folder. If the file existed it will be deleted...
Definition: Filesystem.cpp:24
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:476
A SoftmaxDescriptor for the SoftmaxLayer.