aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-06-26 15:02:47 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-06-27 14:34:50 +0000
commit98180eff3c1feed19576d82ceac06d476235d973 (patch)
tree3fb83f43e02a10de12c7b2d2af2984bf4785b25d
parent735a450d3b53a2d745b9a7a6d85747e25ec37ede (diff)
downloadarmnn-98180eff3c1feed19576d82ceac06d476235d973.tar.gz
IVGCVSW-3324 Add end-to-end tests for TransposeConvolution2d on CpuRef
* Added one end-to-end test for all supported data types and data layout * Implemented RefLayerSupport::IsTransposeConvolution2dSupported() * Fixed formula used in TransposeConvolution2dLayer::InferOutputShapes() Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: If1ba3c226ecfa17f7fceffae857f39297c6433f2
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp29
-rw-r--r--src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp153
-rw-r--r--src/backends/reference/RefLayerSupport.cpp46
-rw-r--r--src/backends/reference/RefLayerSupport.hpp8
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp4
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp38
7 files changed, 265 insertions, 14 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 47a6f60534..f62ce92921 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -58,6 +58,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::StridedSlice: return "StridedSlice";
case LayerType::Subtraction: return "Subtraction";
case LayerType::Switch: return "Switch";
+ case LayerType::TransposeConvolution2d: return "TransposeConvolution2d";
default:
BOOST_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 69f598d288..1a994e7442 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -66,26 +66,27 @@ std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
- unsigned int inBatchSize = inputShape[0];
- unsigned int inWidth = inputShape[dataLayoutIndex.GetWidthIndex()];
- unsigned int inHeight = inputShape[dataLayoutIndex.GetHeightIndex()];
- unsigned int inChannels = inputShape[dataLayoutIndex.GetChannelsIndex()];
+ const unsigned int batches = inputShape[0];
+ const unsigned int channels = inputShape[dataLayoutIndex.GetChannelsIndex()];
- unsigned int kernelWidth = kernelShape[dataLayoutIndex.GetWidthIndex()];
- unsigned int kernelHeight = kernelShape[dataLayoutIndex.GetHeightIndex()];
+ const unsigned int wInput = inputShape[dataLayoutIndex.GetWidthIndex()];
+ const unsigned int hInput = inputShape[dataLayoutIndex.GetHeightIndex()];
- unsigned int totalPaddingX = m_Param.m_PadLeft + m_Param.m_PadRight;
- unsigned int totalPaddingY = m_Param.m_PadTop + m_Param.m_PadBottom;
+ const unsigned int wKernel = kernelShape[dataLayoutIndex.GetWidthIndex()];
+ const unsigned int hKernel = kernelShape[dataLayoutIndex.GetHeightIndex()];
- unsigned int outWidth = m_Param.m_StrideX * (inWidth + 1) - totalPaddingX + kernelWidth;
- unsigned int outHeight = m_Param.m_StrideY * (inHeight + 1) - totalPaddingY + kernelHeight;
+ const unsigned int wStridedInput = 1u + m_Param.m_StrideX * (wInput - 1);
+ const unsigned int hStridedInput = 1u + m_Param.m_StrideY * (hInput - 1);
- unsigned int outChannels = inChannels;
- unsigned int outBatchSize = inBatchSize;
+ const unsigned int wPaddedOutput = wStridedInput + wKernel - (wKernel % 2);
+ const unsigned int hPaddedOutput = hStridedInput + hKernel - (hKernel % 2);
+
+ unsigned int wOutput = wPaddedOutput - (m_Param.m_PadLeft + m_Param.m_PadRight);
+ unsigned int hOutput = hPaddedOutput - (m_Param.m_PadTop + m_Param.m_PadBottom);
TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
- TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) :
- TensorShape( { outBatchSize, outChannels, outHeight, outWidth });
+ TensorShape( { batches, hOutput, wOutput, channels } ) :
+ TensorShape( { batches, channels, hOutput, wOutput });
return std::vector<TensorShape>({ tensorShape });
}
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..9d6312ea53
--- /dev/null
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -0,0 +1,153 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "QuantizeHelper.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <Permute.hpp>
+#include <ResolveType.hpp>
+
+#include <backendsCommon/test/CommonTestUtils.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <map>
+#include <vector>
+
+namespace
+{
+
+INetworkPtr CreateTransposeConvolution2dNetwork(const armnn::TransposeConvolution2dDescriptor& descriptor,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::ConstTensor& weights,
+ const armnn::Optional<armnn::ConstTensor>& biases)
+{
+ using namespace armnn;
+
+ INetworkPtr network(INetwork::Create());
+ IConnectableLayer* input = network->AddInputLayer(0, "input");
+ IConnectableLayer* transposeConvolution2d =
+ network->AddTransposeConvolution2dLayer(descriptor, weights, biases, "transposeConvolution2d");
+ IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+ Connect(input, transposeConvolution2d, inputInfo, 0, 0);
+ Connect(transposeConvolution2d, output, outputInfo, 0, 0);
+
+ return network;
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
+void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
+ armnn::DataLayout dataLayout)
+{
+ using namespace armnn;
+ using T = ResolveType<ArmnnType>;
+
+ constexpr unsigned int batches = 1u;
+ constexpr unsigned int channels = 1u;
+
+ constexpr unsigned int wInput = 3u;
+ constexpr unsigned int hInput = wInput;
+
+ constexpr unsigned int wOutput = 5u;
+ constexpr unsigned int hOutput = wOutput;
+
+ constexpr unsigned int wWeights = 3u;
+ constexpr unsigned int hWeights = wWeights;
+
+ TensorShape inputShape = MakeTensorShape(batches, channels, hInput, wInput, dataLayout);
+ TensorShape outputShape = MakeTensorShape(batches, channels, hOutput, wOutput, dataLayout);
+ TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, dataLayout);
+
+ const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
+ const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
+
+ TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
+ TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
+ TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset);
+ TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0);
+
+ std::vector<float> inputData =
+ {
+ 1.f, 1.f, 1.f,
+ 1.f, 1.f, 1.f,
+ 1.f, 1.f, 1.f
+ };
+
+ std::vector<float> weightsData =
+ {
+ 1.f, 2.f, 3.f,
+ 4.f, 5.f, 6.f,
+ 7.f, 8.f, 9.f
+ };
+
+ std::vector<float> biasesData = { 1.f };
+
+ std::vector<float> expectedOutputData =
+ {
+ 6.f, 11.f, 6.f, 11.f, 6.f,
+ 11.f, 21.f, 11.f, 21.f, 11.f,
+ 6.f, 11.f, 6.f, 11.f, 6.f,
+ 11.f, 21.f, 11.f, 21.f, 11.f,
+ 6.f, 11.f, 6.f, 11.f, 6.f
+ };
+
+ TransposeConvolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 1;
+ descriptor.m_PadRight = 1;
+ descriptor.m_PadTop = 1;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_BiasEnabled = true;
+ descriptor.m_DataLayout = dataLayout;
+
+ // swizzle data if needed
+ if (dataLayout == armnn::DataLayout::NHWC)
+ {
+ constexpr size_t dataTypeSize = sizeof(float);
+ const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
+
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize);
+ inputData = tmp;
+
+ tmp.resize(weightsData.size());
+ armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize);
+ weightsData = tmp;
+
+ tmp.resize(expectedOutputData.size());
+ armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, expectedOutputData.data(), tmp.data(), dataTypeSize);
+ expectedOutputData = tmp;
+ }
+
+ // quantize data
+ std::vector<T> qInputData = QuantizedVector<T>(qScale, qOffset, inputData);
+ std::vector<T> qWeightsData = QuantizedVector<T>(qScale, qOffset, weightsData);
+ std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+
+ using BT = ResolveType<ArmnnBType>;
+ std::vector<BT> qBiasesData = QuantizedVector<BT>(qScale * qScale, 0, biasesData);
+
+ ConstTensor weights(weightsInfo, qWeightsData);
+ ConstTensor biases(biasesInfo, qBiasesData);
+
+ INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor,
+ inputInfo,
+ outputInfo,
+ weights,
+ Optional<ConstTensor>(biases));
+
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+ { { 0, qInputData } },
+ { { 0, qExpectedOutputData } },
+ backends);
+} \ No newline at end of file
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 06d9e1bff9..429993a55f 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1466,4 +1466,50 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(descriptor);
+
+ bool supported = true;
+
+ std::array<DataType,3> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference TransposeConvolution2d: input is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference TransposeConvolution2d: output is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
+ "Reference TransposeConvolution2d: weights is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference TransposeConvolution2d: input and output types mismatched.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
+ "Reference TransposeConvolution2d: input and weights types mismatched.");
+
+ if (biases.has_value())
+ {
+ std::array<DataType,3> biasesSupportedTypes = {
+ DataType::Float32,
+ DataType::Signed32
+ };
+ supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
+ "Reference TransposeConvolution2d: biases is not a supported type.");
+ }
+
+ return supported;
+}
+
} // namespace armnn
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 5d241492c2..9c397fe66b 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -266,6 +266,14 @@ public:
const TensorInfo& alpha,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsTransposeConvolution2dSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
};
} // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 5ede8b3f02..d906f93a38 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -468,6 +468,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
const TransposeConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
+ if (IsFloat16(info))
+ {
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ }
return std::make_unique<RefTransposeConvolution2dWorkload>(descriptor, info);
}
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 4d56952e27..a528a54cd2 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -13,6 +13,7 @@
#include <backendsCommon/test/ArithmeticTestImpl.hpp>
#include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
+#include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/execution_monitor.hpp>
@@ -930,4 +931,41 @@ BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test)
Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
+// TransposeConvolution2d
+BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest)
+{
+ TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
+ defaultBackends, armnn::DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest)
+{
+ TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ defaultBackends, armnn::DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest)
+{
+ TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ defaultBackends, armnn::DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest)
+{
+ TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
+ defaultBackends, armnn::DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest)
+{
+ TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ defaultBackends, armnn::DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest)
+{
+ TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ defaultBackends, armnn::DataLayout::NHWC);
+}
+
BOOST_AUTO_TEST_SUITE_END()