From c42a987aa53d0fd842c34dee90abef5a9ff15fa4 Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Wed, 2 Feb 2022 16:35:09 +0000 Subject: IVGCVSW-6635 Move MemCopyTestImpl from acl to armnnTestUtils. * Move MemCopyTestImpl.hpp from src/backends/aclCommon/test/ to include/armnnTestutils. * Refactor MemCopyTests in aclCommon, cl and Neon. * Introduce RefMemCopyTests to exercise this utility in x86 builds. Signed-off-by: Colm Donelan Change-Id: I8824f013d3656658ed0a2904bb79384e3af68641 --- include/armnnTestUtils/MemCopyTestImpl.hpp | 115 ++++++++++ .../armnnTestUtils/MockWorkloadFactoryHelper.hpp | 41 ++++ include/armnnTestUtils/TensorHelpers.hpp | 235 +++++++++++++++++++++ include/armnnUtils/QuantizeHelper.hpp | 132 ++++++++++++ 4 files changed, 523 insertions(+) create mode 100644 include/armnnTestUtils/MemCopyTestImpl.hpp create mode 100644 include/armnnTestUtils/MockWorkloadFactoryHelper.hpp create mode 100644 include/armnnTestUtils/TensorHelpers.hpp create mode 100644 include/armnnUtils/QuantizeHelper.hpp (limited to 'include') diff --git a/include/armnnTestUtils/MemCopyTestImpl.hpp b/include/armnnTestUtils/MemCopyTestImpl.hpp new file mode 100644 index 0000000000..1856dcb056 --- /dev/null +++ b/include/armnnTestUtils/MemCopyTestImpl.hpp @@ -0,0 +1,115 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerTestResult.hpp" +#include "TensorCopyUtils.hpp" +#include "TensorHelpers.hpp" +#include "WorkloadTestUtils.hpp" +#include +#include +#include + +namespace +{ + +template> +LayerTestResult MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory, + armnn::IWorkloadFactory& dstWorkloadFactory, + bool withSubtensors) +{ + const std::array shapeData = { { 1u, 1u, 6u, 5u } }; + const armnn::TensorShape tensorShape(4, shapeData.data()); + const armnn::TensorInfo tensorInfo(tensorShape, dataType); + std::vector inputData = + { + 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, + }; + + LayerTestResult ret(tensorInfo); + ret.m_ExpectedData = inputData; + + std::vector actualOutput(tensorInfo.GetNumElements()); + + ARMNN_NO_DEPRECATE_WARN_BEGIN + auto inputTensorHandle = srcWorkloadFactory.CreateTensorHandle(tensorInfo); + auto outputTensorHandle = dstWorkloadFactory.CreateTensorHandle(tensorInfo); + ARMNN_NO_DEPRECATE_WARN_END + + AllocateAndCopyDataToITensorHandle(inputTensorHandle.get(), inputData.data()); + outputTensorHandle->Allocate(); + + armnn::MemCopyQueueDescriptor memCopyQueueDesc; + armnn::WorkloadInfo workloadInfo; + + const unsigned int origin[4] = {}; + + ARMNN_NO_DEPRECATE_WARN_BEGIN + auto workloadInput = (withSubtensors && srcWorkloadFactory.SupportsSubTensors()) + ? srcWorkloadFactory.CreateSubTensorHandle(*inputTensorHandle, tensorShape, origin) + : std::move(inputTensorHandle); + auto workloadOutput = (withSubtensors && dstWorkloadFactory.SupportsSubTensors()) + ? dstWorkloadFactory.CreateSubTensorHandle(*outputTensorHandle, tensorShape, origin) + : std::move(outputTensorHandle); + ARMNN_NO_DEPRECATE_WARN_END + + AddInputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadInput.get()); + AddOutputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadOutput.get()); + + dstWorkloadFactory.CreateWorkload(armnn::LayerType::MemCopy, memCopyQueueDesc, workloadInfo)->Execute(); + + CopyDataFromITensorHandle(actualOutput.data(), workloadOutput.get()); + ret.m_ActualData = actualOutput; + + return ret; +} + +template +struct MemCopyTestHelper +{}; +template <> +struct MemCopyTestHelper +{ + static armnn::IBackendInternal::IMemoryManagerSharedPtr GetMemoryManager() + { + armnn::MockBackend backend; + return backend.CreateMemoryManager(); + } + + static armnn::MockWorkloadFactory + GetFactory(const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) + { + IgnoreUnused(memoryManager); + return armnn::MockWorkloadFactory(); + } +}; + +using MockMemCopyTestHelper = MemCopyTestHelper; + +template > +LayerTestResult MemCopyTest(bool withSubtensors) +{ + + armnn::IBackendInternal::IMemoryManagerSharedPtr srcMemoryManager = + MemCopyTestHelper::GetMemoryManager(); + + armnn::IBackendInternal::IMemoryManagerSharedPtr dstMemoryManager = + MemCopyTestHelper::GetMemoryManager(); + + SrcWorkloadFactory srcWorkloadFactory = MemCopyTestHelper::GetFactory(srcMemoryManager); + DstWorkloadFactory dstWorkloadFactory = MemCopyTestHelper::GetFactory(dstMemoryManager); + + return MemCopyTest(srcWorkloadFactory, dstWorkloadFactory, withSubtensors); +} + +} // anonymous namespace diff --git a/include/armnnTestUtils/MockWorkloadFactoryHelper.hpp b/include/armnnTestUtils/MockWorkloadFactoryHelper.hpp new file mode 100644 index 0000000000..e9f2b2f6cf --- /dev/null +++ b/include/armnnTestUtils/MockWorkloadFactoryHelper.hpp @@ -0,0 +1,41 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "MockBackend.hpp" +#include "MockTensorHandleFactory.hpp" +#include + +namespace +{ + +template <> +struct WorkloadFactoryHelper +{ + static armnn::IBackendInternal::IMemoryManagerSharedPtr GetMemoryManager() + { + armnn::MockBackend backend; + return backend.CreateMemoryManager(); + } + + static armnn::MockWorkloadFactory + GetFactory(const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) + { + IgnoreUnused(memoryManager); + return armnn::MockWorkloadFactory(); + } + + static armnn::MockTensorHandleFactory + GetTensorHandleFactory(const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) + { + + return armnn::MockTensorHandleFactory(std::static_pointer_cast(memoryManager)); + } +}; + +using MockWorkloadFactoryHelper = WorkloadFactoryHelper; + +} // anonymous namespace diff --git a/include/armnnTestUtils/TensorHelpers.hpp b/include/armnnTestUtils/TensorHelpers.hpp new file mode 100644 index 0000000000..ca17e621c3 --- /dev/null +++ b/include/armnnTestUtils/TensorHelpers.hpp @@ -0,0 +1,235 @@ +// +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include + +constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f; + +template +struct SelectiveComparer +{ + static bool Compare(T a, T b) + { + return (std::max(a, b) - std::min(a, b)) <= 1; + } + +}; + +template +struct SelectiveComparer +{ + static bool Compare(T a, T b) + { + // If a or b is zero, percent_tolerance does an exact match, so compare to a small, constant tolerance instead. + if (a == 0.0f || b == 0.0f) + { + return std::abs(a - b) <= g_FloatCloseToZeroTolerance; + } + + if (std::isinf(a) && a == b) + { + return true; + } + + if (std::isnan(a) && std::isnan(b)) + { + return true; + } + + // For unquantized floats we use a tolerance of 1%. + return armnnUtils::within_percentage_tolerance(a, b); + } +}; + +template +bool SelectiveCompare(T a, T b) +{ + return SelectiveComparer()>::Compare(a, b); +}; + +template +bool SelectiveCompareBoolean(T a, T b) +{ + return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0))); +}; + +template +armnn::PredicateResult CompareTensors(const std::vector& actualData, + const std::vector& expectedData, + const armnn::TensorShape& actualShape, + const armnn::TensorShape& expectedShape, + bool compareBoolean = false, + bool isDynamic = false) +{ + if (actualData.size() != expectedData.size()) + { + armnn::PredicateResult res(false); + res.Message() << "Different data size [" + << actualData.size() + << "!=" + << expectedData.size() + << "]"; + return res; + } + + if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions()) + { + armnn::PredicateResult res(false); + res.Message() << "Different number of dimensions [" + << actualShape.GetNumDimensions() + << "!=" + << expectedShape.GetNumDimensions() + << "]"; + return res; + } + + if (actualShape.GetNumElements() != expectedShape.GetNumElements()) + { + armnn::PredicateResult res(false); + res.Message() << "Different number of elements [" + << actualShape.GetNumElements() + << "!=" + << expectedShape.GetNumElements() + << "]"; + return res; + } + + unsigned int numberOfDimensions = actualShape.GetNumDimensions(); + + if (!isDynamic) + { + // Checks they are same shape. + for (unsigned int i = 0; i < numberOfDimensions; ++i) + { + if (actualShape[i] != expectedShape[i]) + { + armnn::PredicateResult res(false); + res.Message() << "Different shapes [" + << actualShape[i] + << "!=" + << expectedShape[i] + << "]"; + return res; + } + } + } + + // Fun iteration over n dimensions. + std::vector indices; + for (unsigned int i = 0; i < numberOfDimensions; i++) + { + indices.emplace_back(0); + } + + std::stringstream errorString; + int numFailedElements = 0; + constexpr int maxReportedDifferences = 3; + unsigned int index = 0; + + // Compare data element by element. + while (true) + { + bool comparison; + // As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans. + if(compareBoolean) + { + comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]); + } + else + { + comparison = SelectiveCompare(actualData[index], expectedData[index]); + } + + if (!comparison) + { + ++numFailedElements; + + if (numFailedElements <= maxReportedDifferences) + { + if (numFailedElements >= 2) + { + errorString << ", "; + } + errorString << "["; + for (unsigned int i = 0; i < numberOfDimensions; ++i) + { + errorString << indices[i]; + if (i != numberOfDimensions - 1) + { + errorString << ","; + } + } + errorString << "]"; + + errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")"; + } + } + + ++indices[numberOfDimensions - 1]; + for (unsigned int i=numberOfDimensions-1; i>0; i--) + { + if (indices[i] == actualShape[i]) + { + indices[i] = 0; + ++indices[i - 1]; + } + } + if (indices[0] == actualShape[0]) + { + break; + } + + index++; + } + + armnn::PredicateResult comparisonResult(true); + if (numFailedElements > 0) + { + comparisonResult.SetResult(false); + comparisonResult.Message() << numFailedElements << " different values at: "; + if (numFailedElements > maxReportedDifferences) + { + errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)"; + } + comparisonResult.Message() << errorString.str(); + } + + return comparisonResult; +} + +template +std::vector MakeRandomTensor(const armnn::TensorInfo& tensorInfo, + unsigned int seed, + float min = -10.0f, + float max = 10.0f) +{ + std::mt19937 gen(seed); + std::uniform_real_distribution dist(min, max); + + std::vector init(tensorInfo.GetNumElements()); + for (unsigned int i = 0; i < init.size(); i++) + { + init[i] = dist(gen); + } + + const float qScale = tensorInfo.GetQuantizationScale(); + const int32_t qOffset = tensorInfo.GetQuantizationOffset(); + + return armnnUtils::QuantizedVector(init, qScale, qOffset); +} diff --git a/include/armnnUtils/QuantizeHelper.hpp b/include/armnnUtils/QuantizeHelper.hpp new file mode 100644 index 0000000000..231b8411cb --- /dev/null +++ b/include/armnnUtils/QuantizeHelper.hpp @@ -0,0 +1,132 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include +#include + +namespace armnnUtils +{ + +template +struct SelectiveQuantizer +{ + static T Quantize(float value, float scale, int32_t offset) + { + return armnn::Quantize(value, scale, offset); + } + + static float Dequantize(T value, float scale, int32_t offset) + { + return armnn::Dequantize(value, scale, offset); + } +}; + +template +struct SelectiveQuantizer +{ + static T Quantize(float value, float scale, int32_t offset) + { + armnn::IgnoreUnused(scale, offset); + return value; + } + + static float Dequantize(T value, float scale, int32_t offset) + { + armnn::IgnoreUnused(scale, offset); + return value; + } +}; + +template<> +struct SelectiveQuantizer +{ + static armnn::Half Quantize(float value, float scale, int32_t offset) + { + armnn::IgnoreUnused(scale, offset); + return armnn::Half(value); + } + + static float Dequantize(armnn::Half value, float scale, int32_t offset) + { + armnn::IgnoreUnused(scale, offset); + return value; + } +}; + +template<> +struct SelectiveQuantizer +{ + static armnn::BFloat16 Quantize(float value, float scale, int32_t offset) + { + armnn::IgnoreUnused(scale, offset); + return armnn::BFloat16(value); + } + + static float Dequantize(armnn::BFloat16 value, float scale, int32_t offset) + { + armnn::IgnoreUnused(scale, offset); + return value; + } +}; + +template +T SelectiveQuantize(float value, float scale, int32_t offset) +{ + return SelectiveQuantizer()>::Quantize(value, scale, offset); +}; + +template +float SelectiveDequantize(T value, float scale, int32_t offset) +{ + return SelectiveQuantizer()>::Dequantize(value, scale, offset); +}; + +template +struct IsFloatingPointIterator +{ + static constexpr bool value=std::is_floating_point::value_type>::value; +}; + +template ::value, int>::type=0 // Makes sure fp iterator is valid. +> +std::vector QuantizedVector(FloatIt first, FloatIt last, float qScale, int32_t qOffset) +{ + std::vector quantized; + quantized.reserve(armnn::numeric_cast(std::distance(first, last))); + + for (auto it = first; it != last; ++it) + { + auto f = *it; + T q = SelectiveQuantize(f, qScale, qOffset); + quantized.push_back(q); + } + + return quantized; +} + +template +std::vector QuantizedVector(const std::vector& array, float qScale = 1.f, int32_t qOffset = 0) +{ + return QuantizedVector(array.begin(), array.end(), qScale, qOffset); +} + +template +std::vector QuantizedVector(std::initializer_list array, float qScale = 1.f, int32_t qOffset = 0) +{ + return QuantizedVector(array.begin(), array.end(), qScale, qOffset); +} + +} // namespace armnnUtils -- cgit v1.2.1