From 46c09d0094b708c70bb4770693c9e704b1fbfeb1 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 28 May 2019 08:15:28 +0100 Subject: IVGCVSW-2970 Support QSymm16 for FullyConnected workloads * Add support for QSymm16 for FullyConnected * Add templating to Uint8 RefLayerTest to test QSymm16 Change-Id: Ie6e989daf2ca966d6c6805b8017126eb77ebfec4 Signed-off-by: Francis Murtagh --- src/backends/backendsCommon/WorkloadData.cpp | 17 +++ src/backends/backendsCommon/WorkloadFactory.cpp | 21 +-- .../backendsCommon/test/FullyConnectedTestImpl.hpp | 119 ----------------- src/backends/backendsCommon/test/LayerTests.hpp | 145 ++++++++++++++++++++- .../backendsCommon/test/WorkloadTestUtils.hpp | 22 ++++ src/backends/cl/test/ClLayerTests.cpp | 4 +- src/backends/neon/test/NeonLayerTests.cpp | 4 +- src/backends/reference/RefLayerSupport.cpp | 82 ++++++++++-- .../reference/test/RefCreateWorkloadTests.cpp | 5 + src/backends/reference/test/RefLayerTests.cpp | 8 +- 10 files changed, 269 insertions(+), 158 deletions(-) diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index a1c74df17b..d9779e4e37 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -511,6 +511,23 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output"); + + // Check the supported data types + std::vector supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], + supportedTypes, + "FullyConnectedQueueDescriptor"); + + ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], + {workloadInfo.m_InputTensorInfos[0].GetDataType()}, + "FullyConnectedQueueDescriptor"); } //--------------------------------------------------------------- diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 0490a94864..763107123f 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -40,26 +41,6 @@ const TensorInfo OverrideDataType(const TensorInfo& info, Optional typ return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset()); } -Optional GetBiasTypeFromWeightsType(Optional weightsType) -{ - if (!weightsType) - { - return weightsType; - } - - switch(weightsType.value()) - { - case DataType::Float16: - case DataType::Float32: - return weightsType; - case DataType::QuantisedAsymm8: - return DataType::Signed32; - default: - BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); - } - return EmptyOptional(); -} - } // anonymous namespace bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, diff --git a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp index 3e6223ab79..402a3e6d51 100644 --- a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp +++ b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp @@ -5,61 +5,8 @@ #include #include "WorkloadTestUtils.hpp" - #include -template -LayerTestResult SimpleFullyConnectedTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - armnn::TensorInfo inputTensorInfo, - armnn::TensorInfo outputTensorInfo, - armnn::TensorInfo weightsDesc, - armnn::TensorInfo biasesDesc, - boost::multi_array& weights, - boost::multi_array& bias, - boost::multi_array& input, - bool biasEnabled, - bool transposeWeights) -{ - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::FullyConnectedQueueDescriptor data; - armnn::WorkloadInfo info; - armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc); - armnn::ScopedCpuTensorHandle biasTensor(biasesDesc); - - AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]); - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); - - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - data.m_Weight = &weightsTensor; - data.m_Bias = &biasTensor; - data.m_Parameters.m_BiasEnabled = biasEnabled; - data.m_Parameters.m_TransposeWeightMatrix = transposeWeights; - - std::unique_ptr workload = workloadFactory.CreateFullyConnected(data, info); - LayerTestResult result(outputTensorInfo); - - inputHandle->Allocate(); - outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - ExecuteWorkload(*workload, memoryManager); - - if (workloadFactory.GetBackendId() == armnn::Compute::CpuRef) - { - workload->PostAllocationConfigure(); - workload->Execute(); - } - - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); - - return result; -} - LayerTestResult FullyConnectedFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -155,72 +102,6 @@ LayerTestResult FullyConnectedFloat32Test( return result; } -LayerTestResult FullyConnectedUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - bool biasEnabled) -{ - constexpr static unsigned int inputWidth = 3u; - constexpr static unsigned int inputHeight = 2u; - constexpr static unsigned int inputChannels = 1u; - - constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; - - constexpr static unsigned int outputChannels = 2u; - - armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, armnn::DataType::QuantisedAsymm8); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(63); - - armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, armnn::DataType::QuantisedAsymm8); - outputTensorInfo.SetQuantizationScale(5.f); - outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); - - armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, armnn::DataType::QuantisedAsymm8); - weightsDesc.SetQuantizationScale(0.2f); - weightsDesc.SetQuantizationOffset(93); - - armnn::TensorInfo biasesDesc({ outputChannels }, armnn::DataType::Signed32); - biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); - biasesDesc.SetQuantizationOffset(0); - - LayerTestResult result(outputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, std::vector{51, 124, 28, - 251, 8, 92}); - - auto weights = MakeTensor(weightsDesc, std::vector{51, 193, 42, 53, 175, 34, - 210, 145, 23, 74, 34, 150}); - - // scale = 0.02 - // offset = 0 - auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); - - result = SimpleFullyConnectedTestImpl( - workloadFactory, - memoryManager, - inputTensorInfo, outputTensorInfo, - weightsDesc, biasesDesc, - weights, bias, input, - biasEnabled, true - ); - - // Manually calculated. - // Note one of these values has been clamped to 0. - if (biasEnabled) - { - result.outputExpected = MakeTensor(outputTensorInfo, std::vector{0, 242}); - } - else - { - result.outputExpected = MakeTensor(outputTensorInfo, std::vector{0, 32}); - } - - return result; -} - - - // // ArmNN variant of the AndroidNN fully_connected_float_large test. // diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 9e57ec58a3..f8f50366eb 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -8,9 +8,15 @@ #include #include +#include "TensorCopyUtils.hpp" +#include "WorkloadTestUtils.hpp" +#include #include #include +#include +#include +#include #include #include @@ -793,7 +799,8 @@ LayerTestResult BoundedReLuUint8Test( float upperBound, float lowerBound); -LayerTestResult FullyConnectedUint8Test( +template> +LayerTestResult FullyConnectedTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled); @@ -1638,3 +1645,139 @@ LayerTestResult QuantizeClampUint8Test( LayerTestResult QuantizeClampInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template +LayerTestResult SimpleFullyConnectedTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + armnn::TensorInfo weightsDesc, + armnn::TensorInfo biasesDesc, + boost::multi_array& weights, + boost::multi_array& bias, + boost::multi_array& input, + bool biasEnabled, + bool transposeWeights) +{ + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::FullyConnectedQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasesDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; + data.m_Parameters.m_BiasEnabled = biasEnabled; + data.m_Parameters.m_TransposeWeightMatrix = transposeWeights; + + std::unique_ptr workload = workloadFactory.CreateFullyConnected(data, info); + LayerTestResult result(outputTensorInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + + return result; +} + +template > +std::vector ConvertToDataType(const std::vector& input, + const armnn::TensorInfo& inputTensorInfo) +{ + std::vector output(input.size()); + auto outputTensorInfo = inputTensorInfo; + outputTensorInfo.SetDataType(ArmnnType); + + std::unique_ptr> pOutputEncoder = armnn::MakeEncoder(outputTensorInfo, output.data()); + armnn::Encoder& rOutputEncoder = *pOutputEncoder; + + for (auto it = input.begin(); it != input.end(); ++it) + { + rOutputEncoder.Set(*it); + ++rOutputEncoder; + } + return output; +} + +template +LayerTestResult FullyConnectedTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled) +{ + constexpr static unsigned int inputWidth = 3u; + constexpr static unsigned int inputHeight = 2u; + constexpr static unsigned int inputChannels = 1u; + + constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; + + constexpr static unsigned int outputChannels = 2u; + + armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType); + inputTensorInfo.SetQuantizationScale(0.1f); + inputTensorInfo.SetQuantizationOffset(63); + + armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType); + outputTensorInfo.SetQuantizationScale(5.f); + outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); + + armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType); + weightsDesc.SetQuantizationScale(0.2f); + weightsDesc.SetQuantizationOffset(93); + + armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value()); + biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); + biasesDesc.SetQuantizationOffset(0); + + LayerTestResult result(outputTensorInfo); + + auto input = MakeTensor(inputTensorInfo, ConvertToDataType( + { + -1.2f, 6.1f, -3.5f, + 18.8f, -5.5f, 2.9f + }, + inputTensorInfo)); + + auto weights = MakeTensor(weightsDesc, ConvertToDataType( + { + -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, + 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f + }, + weightsDesc)); + + auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); + + result = SimpleFullyConnectedTestImpl( + workloadFactory, + memoryManager, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + biasEnabled, true + ); + + if (biasEnabled) + { + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({80.f, 1460.f}, outputTensorInfo)); + } + else + { + result.outputExpected = MakeTensor(outputTensorInfo, + ConvertToDataType({-107.04f, 110.f}, outputTensorInfo)); + } + + return result; +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp index 212fea34a6..2f3e786f07 100644 --- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp +++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp @@ -86,4 +86,26 @@ inline void ExecuteWorkload(armnn::IWorkload& workload, } } +inline armnn::Optional GetBiasTypeFromWeightsType(armnn::Optional weightsType) +{ + if (!weightsType) + { + return weightsType; + } + + switch(weightsType.value()) + { + case armnn::DataType::Float16: + case armnn::DataType::Float32: + return weightsType; + case armnn::DataType::QuantisedAsymm8: + return armnn::DataType::Signed32; + case armnn::DataType::QuantisedSymm16: + return armnn::DataType::Signed32; + default: + BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); + } + return armnn::EmptyOptional(); +} + } // anonymous namespace diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index fe88b39732..17be230271 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -61,8 +61,8 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwUintTest3) ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) -ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest, true) ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 48e411daee..1ea227a14a 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -314,8 +314,8 @@ ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, tr ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) -ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest, true) // Add ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index b6da628be3..9be1ed6d74 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -12,6 +12,7 @@ #include #include +#include #include @@ -108,9 +109,29 @@ struct TypeAnyOf : public Rule TypeAnyOf(const TensorInfo& info, const Container& c) { m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt) - { - return dt == info.GetDataType(); - }); + { + return dt == info.GetDataType(); + }); + } +}; + +struct BiasAndWeightsTypesMatch : public Rule +{ + BiasAndWeightsTypesMatch(const TensorInfo& biases, const TensorInfo& weights) + { + m_Res = biases.GetDataType() == GetBiasTypeFromWeightsType(weights.GetDataType()).value(); + } +}; + +struct BiasAndWeightsTypesCompatible : public Rule +{ + template + BiasAndWeightsTypesCompatible(const TensorInfo& info, const Container& c) + { + m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt) + { + return dt == GetBiasTypeFromWeightsType(info.GetDataType()).value(); + }); } }; @@ -569,14 +590,53 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, const FullyConnectedDescriptor& descriptor, Optional reasonIfUnsupported) const { - ignore_unused(output); - ignore_unused(weights); - ignore_unused(biases); - ignore_unused(descriptor); - return IsSupportedForDataTypeRef(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); + bool supported = true; + + // Define supported types. + std::array supportedTypes = + { + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference Fully Connected: input type not supported."); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference Fully Connected: output type not supported."); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference Fully Connected: input and output types mismatched."); + + supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported, + "Reference Fully Connected: weights type not supported."); + + supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported, + "Reference Fully Connected: input and weight types mismatched."); + + if (descriptor.m_BiasEnabled) + { + // Defined supported types for bias + std::array + supportedBiasTypes = + { + DataType::Float32, + DataType::Signed32 + }; + + supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported, + "Reference Fully Connected: bias type not supported."); + + supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported, + "Reference Fully Connected: bias and weight types mismatch."); + + supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported, + "Reference Fully Connected: bias type inferred from weights is incompatible."); + + } + + return supported; } bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 48b85cb9de..95da7abad1 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -327,6 +327,11 @@ BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8) RefCreateFullyConnectedWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm16) +{ + RefCreateFullyConnectedWorkloadTest(); +} + template static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout) { diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index ed8f02f46d..1207c1d648 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -234,11 +234,13 @@ ARMNN_AUTO_TEST_CASE(SquareInt16, SquareInt16Test) ARMNN_AUTO_TEST_CASE(TanhInt16, TanhInt16Test) -// Fully Conected +// Fully Connected ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest, false) ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) -ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest, true) ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) -- cgit v1.2.1