diff options
Diffstat (limited to 'src/armnn/test')
-rw-r--r-- | src/armnn/test/EndToEndTest.cpp | 33 | ||||
-rw-r--r-- | src/armnn/test/QuantizerTest.cpp | 14 | ||||
-rw-r--r-- | src/armnn/test/TensorHelpers.hpp | 28 |
3 files changed, 22 insertions, 53 deletions
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp index d25e197f63..df84be4277 100644 --- a/src/armnn/test/EndToEndTest.cpp +++ b/src/armnn/test/EndToEndTest.cpp @@ -7,8 +7,6 @@ #include <armnn/IRuntime.hpp> #include <armnn/INetwork.hpp> -#include <backendsCommon/test/QuantizeHelper.hpp> - #include <boost/core/ignore_unused.hpp> #include <boost/test/unit_test.hpp> @@ -16,37 +14,6 @@ BOOST_AUTO_TEST_SUITE(EndToEnd) -namespace -{ - -template<typename T> -bool IsFloatIterFunc(T iter) -{ - boost::ignore_unused(iter); - return IsFloatingPointIterator<T>::value; -} - -} //namespace - -BOOST_AUTO_TEST_CASE(QuantizedHelper) -{ - std::vector<float> fArray; - BOOST_TEST(IsFloatIterFunc(fArray.begin()) == true); - BOOST_TEST(IsFloatIterFunc(fArray.cbegin()) == true); - - std::vector<double> dArray; - BOOST_TEST(IsFloatIterFunc(dArray.begin()) == true); - - std::vector<int> iArray; - BOOST_TEST(IsFloatIterFunc(iArray.begin()) == false); - - float floats[5]; - BOOST_TEST(IsFloatIterFunc(&floats[0]) == true); - - int ints[5]; - BOOST_TEST(IsFloatIterFunc(&ints[0]) == false); -} - BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork) { using namespace armnn; diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 90fd5e90dd..3f57ce83b2 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -4,17 +4,19 @@ // #include <armnn/INetwork.hpp> +#include <armnn/LayerVisitorBase.hpp> #include <armnn/Tensor.hpp> -#include <armnnQuantizer/INetworkQuantizer.hpp> #include <armnn/Types.hpp> -#include "armnn/LayerVisitorBase.hpp" +#include <armnnQuantizer/INetworkQuantizer.hpp> + +#include <QuantizeHelper.hpp> + #include "../Graph.hpp" #include "../Network.hpp" #include "../NetworkQuantizerUtils.hpp" #include "../OverrideInputRangeVisitor.hpp" #include "../RangeTracker.hpp" -#include "../backends/backendsCommon/test/QuantizeHelper.hpp" #include "../../armnnQuantizer/CommandLineProcessor.hpp" #include <boost/test/unit_test.hpp> @@ -2294,9 +2296,9 @@ std::vector<uint8_t> SetupQuantize(float value) std::vector<float> input({ value, 0.0f, 0.0f, 1.0f }); const std::vector<float> &inputRef = input; - auto output = QuantizedVector<uint8_t>(inputInfo.GetQuantizationScale(), - inputInfo.GetQuantizationOffset(), - inputRef); + auto output = armnnUtils::QuantizedVector<uint8_t>(inputRef, + inputInfo.GetQuantizationScale(), + inputInfo.GetQuantizationOffset()); return output; } diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index 35e471e0f2..3f8589353c 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -4,23 +4,21 @@ // #pragma once -#include <armnn/TensorFwd.hpp> -#include <boost/test/unit_test.hpp> -#include <boost/multi_array.hpp> -#include <vector> -#include <array> +#include <armnn/Tensor.hpp> + +#include <QuantizeHelper.hpp> #include <boost/assert.hpp> -#include <boost/test/tools/floating_point_comparison.hpp> +#include <boost/multi_array.hpp> +#include <boost/numeric/conversion/cast.hpp> #include <boost/random/uniform_real_distribution.hpp> #include <boost/random/mersenne_twister.hpp> -#include <boost/numeric/conversion/cast.hpp> - -#include <armnn/Tensor.hpp> - -#include <backendsCommon/test/QuantizeHelper.hpp> +#include <boost/test/tools/floating_point_comparison.hpp> +#include <boost/test/unit_test.hpp> +#include <array> #include <cmath> +#include <vector> constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f; @@ -235,7 +233,9 @@ boost::multi_array<T, n> MakeRandomTensor(const armnn::TensorInfo& tensorInfo, { init[i] = dist(gen); } - float qScale = tensorInfo.GetQuantizationScale(); - int32_t qOffset = tensorInfo.GetQuantizationOffset(); - return MakeTensor<T, n>(tensorInfo, QuantizedVector<T>(qScale, qOffset, init)); + + const float qScale = tensorInfo.GetQuantizationScale(); + const int32_t qOffset = tensorInfo.GetQuantizationOffset(); + + return MakeTensor<T, n>(tensorInfo, armnnUtils::QuantizedVector<T>(init, qScale, qOffset)); } |