diff options
Diffstat (limited to 'src/armnnSerializer')
-rw-r--r-- | src/armnnSerializer/Schema.fbs | 15 | ||||
-rw-r--r-- | src/armnnSerializer/Serializer.cpp | 21 | ||||
-rw-r--r-- | src/armnnSerializer/Serializer.hpp | 5 | ||||
-rw-r--r-- | src/armnnSerializer/SerializerSupport.md | 1 | ||||
-rw-r--r-- | src/armnnSerializer/test/SerializerTests.cpp | 91 |
5 files changed, 130 insertions, 3 deletions
diff --git a/src/armnnSerializer/Schema.fbs b/src/armnnSerializer/Schema.fbs index 5d6388d944..411b89ad8e 100644 --- a/src/armnnSerializer/Schema.fbs +++ b/src/armnnSerializer/Schema.fbs @@ -66,7 +66,8 @@ enum LayerType : uint { Addition = 0, Input = 1, Multiplication = 2, - Output = 3 + Output = 3, + Softmax = 4 } // Base layer table to be used as part of other layers @@ -96,6 +97,15 @@ table MultiplicationLayer { base:LayerBase; } +table SoftmaxLayer { + base:LayerBase; + descriptor:SoftmaxDescriptor; +} + +table SoftmaxDescriptor { + beta:float; +} + table OutputLayer { base:BindableLayerBase; } @@ -104,7 +114,8 @@ union Layer { AdditionLayer, InputLayer, MultiplicationLayer, - OutputLayer + OutputLayer, + SoftmaxLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 79ad7e19af..ba4b36934c 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -119,6 +119,27 @@ void SerializerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer, CreateAnyLayer(flatBufferMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer); } +// Build FlatBuffer for Softmax Layer +void SerializerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer, + const SoftmaxDescriptor& softmaxDescriptor, + const char* name) +{ + // Create FlatBuffer BaseLayer + auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax); + + // Create the FlatBuffer SoftmaxDescriptor + auto flatBufferSoftmaxDesc = + serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder, softmaxDescriptor.m_Beta); + + // Create the FlatBuffer SoftmaxLayer + auto flatBufferSoftmaxLayer = + serializer::CreateSoftmaxLayer(m_flatBufferBuilder, + flatBufferSoftmaxBaseLayer, + flatBufferSoftmaxDesc); + + CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer); +} + fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer, const serializer::LayerType layerType) { diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 8a509e880e..ec26dc1796 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -9,7 +9,6 @@ #include <armnnSerializer/ISerializer.hpp> -#include <iostream> #include <unordered_map> #include <Schema_generated.h> @@ -57,6 +56,10 @@ public: void VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; + void VisitSoftmaxLayer(const armnn::IConnectableLayer* layer, + const armnn::SoftmaxDescriptor& softmaxDescriptor, + const char* name = nullptr) override; + private: /// Creates the Input Slots and Output Slots and LayerBase for the layer. diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md index 5978c8a952..617eafb46d 100644 --- a/src/armnnSerializer/SerializerSupport.md +++ b/src/armnnSerializer/SerializerSupport.md @@ -8,5 +8,6 @@ The Arm NN SDK Serializer currently supports the following layers: * Addition * Multiplication +* Softmax More machine learning layers will be supported in future releases.
\ No newline at end of file diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index ab4bc0fe0b..5b55682dfa 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -5,12 +5,23 @@ #include <armnn/ArmNN.hpp> #include <armnn/INetwork.hpp> + #include "../Serializer.hpp" + +#include <armnnDeserializeParser/IDeserializeParser.hpp> + +#include <numeric> #include <sstream> +#include <vector> + #include <boost/test/unit_test.hpp> +#include <flatbuffers/idl.h> + BOOST_AUTO_TEST_SUITE(SerializerTests) +armnnDeserializeParser::IDeserializeParserPtr g_Parser = armnnDeserializeParser::IDeserializeParser::Create(); + BOOST_AUTO_TEST_CASE(SimpleNetworkSerialization) { armnn::INetworkPtr network = armnn::INetwork::Create(); @@ -58,4 +69,84 @@ BOOST_AUTO_TEST_CASE(SimpleNetworkWithMultiplicationSerialization) BOOST_TEST(stream.str().find(multLayerName) != stream.str().npos); } +BOOST_AUTO_TEST_CASE(SimpleSoftmaxIntegration) +{ + armnn::TensorInfo tensorInfo({1, 10}, armnn::DataType::Float32); + + armnn::SoftmaxDescriptor descriptor; + descriptor.m_Beta = 1.0f; + + // Create test network + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer *const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer *const softmaxLayer = network->AddSoftmaxLayer(descriptor, "softmax"); + armnn::IConnectableLayer *const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); + softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + softmaxLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); + + // Serialize + armnnSerializer::Serializer serializer; + serializer.Serialize(*network); + std::stringstream stream; + serializer.SaveSerializedToStream(stream); + const std::string serializerString{stream.str()}; + + // Deserialize + armnn::INetworkPtr deserializedNetwork = + g_Parser->CreateNetworkFromBinary({serializerString.begin(), serializerString.end()}); + BOOST_CHECK(deserializedNetwork); + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr run = armnn::IRuntime::Create(options); + + armnn::IOptimizedNetworkPtr optimizedNetwork = + armnn::Optimize(*network, {armnn::Compute::CpuRef}, run->GetDeviceSpec()); + BOOST_CHECK(optimizedNetwork); + + armnn::IOptimizedNetworkPtr deserializedOptimizedNetwork = + armnn::Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, run->GetDeviceSpec()); + BOOST_CHECK(deserializedOptimizedNetwork); + + armnn::NetworkId networkId1; + armnn::NetworkId networkId2; + + run->LoadNetwork(networkId1, std::move(optimizedNetwork)); + run->LoadNetwork(networkId2, std::move(deserializedOptimizedNetwork)); + + std::vector<float> inputData(tensorInfo.GetNumElements()); + std::iota(inputData.begin(), inputData.end(), 0); + + armnn::InputTensors inputTensors1 + { + {0, armnn::ConstTensor(run->GetInputTensorInfo(networkId1, 0), inputData.data())} + }; + + armnn::InputTensors inputTensors2 + { + {0, armnn::ConstTensor(run->GetInputTensorInfo(networkId2, 0), inputData.data())} + }; + + std::vector<float> outputData1(inputData.size()); + std::vector<float> outputData2(inputData.size()); + + armnn::OutputTensors outputTensors1 + { + {0, armnn::Tensor(run->GetOutputTensorInfo(networkId1, 0), outputData1.data())} + }; + + armnn::OutputTensors outputTensors2 + { + {0, armnn::Tensor(run->GetOutputTensorInfo(networkId2, 0), outputData2.data())} + }; + + run->EnqueueWorkload(networkId1, inputTensors1, outputTensors1); + run->EnqueueWorkload(networkId2, inputTensors2, outputTensors2); + + BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(), + outputData2.begin(), outputData2.end()); +} + BOOST_AUTO_TEST_SUITE_END() |