From c04125f678be1cce86ebcfe94b587e8eba5b7fde Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Tue, 19 Feb 2019 16:31:08 +0000 Subject: IVGCVSW-2685 Serialize / de-serialize the DepthwiseConvolution2d layer Change-Id: I37e360c824b30cb14cbef86f6ff7636bc9382109 Signed-off-by: Aron Virginas-Tar --- src/armnnSerializer/test/SerializerTests.cpp | 385 +++++++++++++-------------- 1 file changed, 190 insertions(+), 195 deletions(-) (limited to 'src/armnnSerializer/test') diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 31ef0455c3..a88193d842 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -10,7 +10,7 @@ #include -#include +#include #include #include @@ -40,113 +40,120 @@ std::string SerializeNetwork(const armnn::INetwork& network) return serializerString; } -} // anonymous namespace - -BOOST_AUTO_TEST_SUITE(SerializerTests) - -BOOST_AUTO_TEST_CASE(SimpleNetworkSerialization) +template +static std::vector GenerateRandomData(size_t size) { - armnn::INetworkPtr network = armnn::INetwork::Create(); - armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); - armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); + constexpr bool isIntegerType = std::is_integral::value; + using Distribution = + typename std::conditional, + std::uniform_real_distribution>::type; - armnn::IConnectableLayer* const additionLayer0 = network->AddAdditionLayer(); - inputLayer0->GetOutputSlot(0).Connect(additionLayer0->GetInputSlot(0)); - inputLayer1->GetOutputSlot(0).Connect(additionLayer0->GetInputSlot(1)); + static constexpr DataType lowerLimit = std::numeric_limits::min(); + static constexpr DataType upperLimit = std::numeric_limits::max(); - armnn::IConnectableLayer* const outputLayer0 = network->AddOutputLayer(0); - additionLayer0->GetOutputSlot(0).Connect(outputLayer0->GetInputSlot(0)); + static Distribution distribution(lowerLimit, upperLimit); + static std::default_random_engine generator; - armnnSerializer::Serializer serializer; - serializer.Serialize(*network); + std::vector randomData(size); + std::generate(randomData.begin(), randomData.end(), []() { return distribution(generator); }); - std::stringstream stream; - serializer.SaveSerializedToStream(stream); - BOOST_TEST(stream.str().length() > 0); + return randomData; } -BOOST_AUTO_TEST_CASE(Conv2dSerialization) +void CheckDeserializedNetworkAgainstOriginal(const armnn::INetwork& deserializedNetwork, + const armnn::INetwork& originalNetwork, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputShape, + armnn::LayerBindingId inputBindingId = 0, + armnn::LayerBindingId outputBindingId = 0) { - armnn::IRuntime::CreationOptions options; // default options - armnn::IRuntimePtr run = armnn::IRuntime::Create(options); + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options); - armnnDeserializeParser::IDeserializeParserPtr parser = armnnDeserializeParser::IDeserializeParser::Create(); + std::vector preferredBackends = { armnn::BackendId("CpuRef") }; - armnn::TensorInfo inputInfo(armnn::TensorShape({1, 5, 5, 1}), armnn::DataType::Float32, 1.0f, 0); - armnn::TensorInfo outputInfo(armnn::TensorShape({1, 3, 3, 1}), armnn::DataType::Float32, 4.0f, 0); + // Optimize original network + armnn::IOptimizedNetworkPtr optimizedOriginalNetwork = + armnn::Optimize(originalNetwork, preferredBackends, runtime->GetDeviceSpec()); + BOOST_CHECK(optimizedOriginalNetwork); - armnn::TensorInfo weightsInfo(armnn::TensorShape({1, 3, 3, 1}), armnn::DataType::Float32, 2.0f, 0); + // Optimize deserialized network + armnn::IOptimizedNetworkPtr optimizedDeserializedNetwork = + armnn::Optimize(deserializedNetwork, preferredBackends, runtime->GetDeviceSpec()); + BOOST_CHECK(optimizedDeserializedNetwork); - std::vector weightsData({4, 5, 6, 0, 0, 0, 3, 2, 1}); + armnn::NetworkId networkId1; + armnn::NetworkId networkId2; - // Construct network - armnn::INetworkPtr network = armnn::INetwork::Create(); + // Load original and deserialized network + armnn::Status status1 = runtime->LoadNetwork(networkId1, std::move(optimizedOriginalNetwork)); + BOOST_CHECK(status1 == armnn::Status::Success); - armnn::Convolution2dDescriptor descriptor; - descriptor.m_PadLeft = 1; - descriptor.m_PadRight = 1; - descriptor.m_PadTop = 1; - descriptor.m_PadBottom = 1; - descriptor.m_StrideX = 2; - descriptor.m_StrideY = 2; - descriptor.m_BiasEnabled = false; - descriptor.m_DataLayout = armnn::DataLayout::NHWC; + armnn::Status status2 = runtime->LoadNetwork(networkId2, std::move(optimizedDeserializedNetwork)); + BOOST_CHECK(status2 == armnn::Status::Success); - armnn::ConstTensor weights(weightsInfo, weightsData); + // Generate some input data + std::vector inputData = GenerateRandomData(inputShape.GetNumElements()); - armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input"); - armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(descriptor, weights, "conv"); - armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0, "output"); + armnn::InputTensors inputTensors1 + { + { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId1, inputBindingId), inputData.data()) } + }; - inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + armnn::InputTensors inputTensors2 + { + { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId2, inputBindingId), inputData.data()) } + }; - convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + std::vector outputData1(outputShape.GetNumElements()); + std::vector outputData2(outputShape.GetNumElements()); - armnnSerializer::Serializer serializer; - serializer.Serialize(*network); + armnn::OutputTensors outputTensors1 + { + { 0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId1, outputBindingId), outputData1.data()) } + }; - std::stringstream stream; - serializer.SaveSerializedToStream(stream); + armnn::OutputTensors outputTensors2 + { + { 0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId2, outputBindingId), outputData2.data()) } + }; - std::string const serializerString{stream.str()}; - std::vector const serializerVector{serializerString.begin(), serializerString.end()}; + // Run original and deserialized network + runtime->EnqueueWorkload(networkId1, inputTensors1, outputTensors1); + runtime->EnqueueWorkload(networkId2, inputTensors2, outputTensors2); - armnn::INetworkPtr deserializedNetwork = parser->CreateNetworkFromBinary(serializerVector); + // Compare output data + BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(), + outputData2.begin(), outputData2.end()); +} - auto deserializedOptimized = Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, run->GetDeviceSpec()); +} // anonymous namespace - armnn::NetworkId networkIdentifier; +BOOST_AUTO_TEST_SUITE(SerializerTests) - // Load graph into runtime - run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized)); +BOOST_AUTO_TEST_CASE(SerializeAddition) +{ + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0); + armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1); - std::vector inputData - { - 1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9, 2 - }; - armnn::InputTensors inputTensors - { - {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())} - }; + armnn::IConnectableLayer* const additionLayer0 = network->AddAdditionLayer(); + inputLayer0->GetOutputSlot(0).Connect(additionLayer0->GetInputSlot(0)); + inputLayer1->GetOutputSlot(0).Connect(additionLayer0->GetInputSlot(1)); - std::vector expectedOutputData - { - 23, 33, 24, 91, 99, 48, 26, 50, 19 - }; + armnn::IConnectableLayer* const outputLayer0 = network->AddOutputLayer(0); + additionLayer0->GetOutputSlot(0).Connect(outputLayer0->GetInputSlot(0)); - std::vector outputData(9); - armnn::OutputTensors outputTensors - { - {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())} - }; - run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors); - BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(), - expectedOutputData.begin(), expectedOutputData.end()); + armnnSerializer::Serializer serializer; + serializer.Serialize(*network); + + std::stringstream stream; + serializer.SaveSerializedToStream(stream); + BOOST_TEST(stream.str().length() > 0); } -BOOST_AUTO_TEST_CASE(SimpleNetworkWithMultiplicationSerialization) +BOOST_AUTO_TEST_CASE(SerializeMultiplication) { const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32); @@ -172,14 +179,57 @@ BOOST_AUTO_TEST_CASE(SimpleNetworkWithMultiplicationSerialization) BOOST_TEST(stream.str().find(multLayerName) != stream.str().npos); } -BOOST_AUTO_TEST_CASE(SimpleReshapeIntegration) +BOOST_AUTO_TEST_CASE(SerializeDeserializeConvolution2d) { - armnn::NetworkId networkIdentifier; - armnn::IRuntime::CreationOptions options; // default options - armnn::IRuntimePtr run = armnn::IRuntime::Create(options); + armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); + armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); + + armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); + armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32); + + // Construct network + armnn::INetworkPtr network = armnn::INetwork::Create(); + + armnn::Convolution2dDescriptor descriptor; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 2; + descriptor.m_BiasEnabled = true; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); + armnn::ConstTensor weights(weightsInfo, weightsData); + + std::vector biasesData = GenerateRandomData(biasesInfo.GetNumElements()); + armnn::ConstTensor biases(biasesInfo, biasesData); - unsigned int inputShape[] = {1, 9}; - unsigned int outputShape[] = {3, 3}; + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input"); + armnn::IConnectableLayer* const convLayer = + network->AddConvolution2dLayer(descriptor, weights, biases, "convolution"); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0, "output"); + + inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + + convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + CheckDeserializedNetworkAgainstOriginal(*network, + *deserializedNetwork, + inputInfo.GetShape(), + outputInfo.GetShape()); +} + +BOOST_AUTO_TEST_CASE(SerializeDeserializeReshape) +{ + unsigned int inputShape[] = { 1, 9 }; + unsigned int outputShape[] = { 3, 3 }; auto inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::DataType::Float32); auto outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32); @@ -198,49 +248,62 @@ BOOST_AUTO_TEST_CASE(SimpleReshapeIntegration) reshapeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - armnnSerializer::Serializer serializer; - serializer.Serialize(*network); - std::stringstream stream; - serializer.SaveSerializedToStream(stream); - std::string const serializerString{stream.str()}; + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); - //Deserialize network. - auto deserializedNetwork = DeserializeNetwork(serializerString); + CheckDeserializedNetworkAgainstOriginal(*network, + *deserializedNetwork, + inputTensorInfo.GetShape(), + outputTensorInfo.GetShape()); +} - //Optimize the deserialized network - auto deserializedOptimized = Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, - run->GetDeviceSpec()); +BOOST_AUTO_TEST_CASE(SerializeDeserializeDepthwiseConvolution2d) +{ + armnn::TensorInfo inputInfo ({ 1, 5, 5, 3 }, armnn::DataType::Float32); + armnn::TensorInfo outputInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32); - // Load graph into runtime - run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized)); + armnn::TensorInfo weightsInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32); + armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32); - std::vector input1Data(inputTensorInfo.GetNumElements()); - std::iota(input1Data.begin(), input1Data.end(), 8); + armnn::DepthwiseConvolution2dDescriptor descriptor; + descriptor.m_StrideX = 1; + descriptor.m_StrideY = 1; + descriptor.m_BiasEnabled = true; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; - armnn::InputTensors inputTensors - { - {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())} - }; + std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); + armnn::ConstTensor weights(weightsInfo, weightsData); - std::vector outputData(input1Data.size()); - armnn::OutputTensors outputTensors - { - {0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())} - }; + std::vector biasesData = GenerateRandomData(biasesInfo.GetNumElements()); + armnn::ConstTensor biases(biasesInfo, biasesData); - run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors); + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const depthwiseConvLayer = + network->AddDepthwiseConvolution2dLayer(descriptor, weights, biases, "depthwiseConv"); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); - BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(),outputData.end(), input1Data.begin(),input1Data.end()); + inputLayer->GetOutputSlot(0).Connect(depthwiseConvLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + depthwiseConvLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + depthwiseConvLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + CheckDeserializedNetworkAgainstOriginal(*network, + *deserializedNetwork, + inputInfo.GetShape(), + outputInfo.GetShape()); } -BOOST_AUTO_TEST_CASE(SimpleSoftmaxIntegration) +BOOST_AUTO_TEST_CASE(SerializeDeserializeSoftmax) { armnn::TensorInfo tensorInfo({1, 10}, armnn::DataType::Float32); armnn::SoftmaxDescriptor descriptor; descriptor.m_Beta = 1.0f; - // Create test network armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); armnn::IConnectableLayer* const softmaxLayer = network->AddSoftmaxLayer(descriptor, "softmax"); @@ -251,71 +314,22 @@ BOOST_AUTO_TEST_CASE(SimpleSoftmaxIntegration) softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); softmaxLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - // Serialize & deserialize network armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); BOOST_CHECK(deserializedNetwork); - armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options); - - armnn::IOptimizedNetworkPtr optimizedNetwork = - armnn::Optimize(*network, {armnn::Compute::CpuRef}, runtime->GetDeviceSpec()); - BOOST_CHECK(optimizedNetwork); - - armnn::IOptimizedNetworkPtr deserializedOptimizedNetwork = - armnn::Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, runtime->GetDeviceSpec()); - BOOST_CHECK(deserializedOptimizedNetwork); - - armnn::NetworkId networkId1; - armnn::NetworkId networkId2; - - runtime->LoadNetwork(networkId1, std::move(optimizedNetwork)); - runtime->LoadNetwork(networkId2, std::move(deserializedOptimizedNetwork)); - - std::vector inputData(tensorInfo.GetNumElements()); - std::iota(inputData.begin(), inputData.end(), 0); - - armnn::InputTensors inputTensors1 - { - {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId1, 0), inputData.data())} - }; - - armnn::InputTensors inputTensors2 - { - {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId2, 0), inputData.data())} - }; - - std::vector outputData1(inputData.size()); - std::vector outputData2(inputData.size()); - - armnn::OutputTensors outputTensors1 - { - {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId1, 0), outputData1.data())} - }; - - armnn::OutputTensors outputTensors2 - { - {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId2, 0), outputData2.data())} - }; - - runtime->EnqueueWorkload(networkId1, inputTensors1, outputTensors1); - runtime->EnqueueWorkload(networkId2, inputTensors2, outputTensors2); - - BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(), - outputData2.begin(), outputData2.end()); + CheckDeserializedNetworkAgainstOriginal(*network, + *deserializedNetwork, + tensorInfo.GetShape(), + tensorInfo.GetShape()); } -BOOST_AUTO_TEST_CASE(SimplePooling2dIntegration) +BOOST_AUTO_TEST_CASE(SerializeDeserializePooling2d) { - armnn::NetworkId networkIdentifier; - armnn::IRuntime::CreationOptions options; // default options - armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options); - unsigned int inputShape[] = {1, 2, 2, 1}; unsigned int outputShape[] = {1, 1, 1, 1}; - auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); - auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + auto inputInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + auto outputInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); armnn::Pooling2dDescriptor desc; desc.m_DataLayout = armnn::DataLayout::NHWC; @@ -337,36 +351,17 @@ BOOST_AUTO_TEST_CASE(SimplePooling2dIntegration) armnn::IConnectableLayer *const outputLayer = network->AddOutputLayer(0); inputLayer->GetOutputSlot(0).Connect(pooling2dLayer->GetInputSlot(0)); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); pooling2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - pooling2dLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - - auto deserializeNetwork = DeserializeNetwork(SerializeNetwork(*network)); - - //Optimize the deserialized network - auto deserializedOptimized = Optimize(*deserializeNetwork, {armnn::Compute::CpuRef}, - runtime->GetDeviceSpec()); + pooling2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); - // Load graph into runtime - runtime->LoadNetwork(networkIdentifier, std::move(deserializedOptimized)); - - std::vector input1Data(inputTensorInfo.GetNumElements()); - std::iota(input1Data.begin(), input1Data.end(), 4); - - armnn::InputTensors inputTensors - { - {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())} - }; - - std::vector outputData(input1Data.size()); - armnn::OutputTensors outputTensors - { - {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())} - }; - - runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors); + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); - BOOST_CHECK_EQUAL(outputData[0], 5.5); + CheckDeserializedNetworkAgainstOriginal(*network, + *deserializedNetwork, + inputInfo.GetShape(), + outputInfo.GetShape()); } -BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file -- cgit v1.2.1