diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-04-01 16:51:23 +0100 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-04-06 09:06:01 +0100 |
commit | ac2770a4bb6461bfbddec928bb6208f26f898f02 (patch) | |
tree | c72f67f648b7aca2f4bccf69b05d185bf5f9ccad /src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp | |
parent | 7ee5d2c3b3cee5a924ed6347fef613ee07b5aca7 (diff) | |
download | armnn-ac2770a4bb6461bfbddec928bb6208f26f898f02.tar.gz |
IVGCVSW-4485 Remove Boost assert
* Change boost assert to armnn assert
* Change include file to armnn assert
* Fix ARMNN_ASSERT_MSG issue with multiple conditions
* Change BOOST_ASSERT to BOOST_TEST where appropriate
* Remove unused include statements
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp')
-rw-r--r-- | src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp index 50ad667dde..c66027efdf 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp @@ -169,9 +169,9 @@ template<typename T, typename B> void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset, const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h) { - BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()), + ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()), "Invalid type and parameter combination."); - BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()), + ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()), "Invalid type and parameter combination."); // Note we need to dequantize and re-quantize the image value and the bias. @@ -183,7 +183,7 @@ void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset, for (uint32_t x = 0; x < w; ++x) { uint32_t offset = (i * h + y) * w + x; - BOOST_ASSERT(offset < v.size()); + ARMNN_ASSERT(offset < v.size()); T& outRef = v[offset]; float dOutput = SelectiveDequantize(outRef, vScale, vOffset); outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset); @@ -236,11 +236,11 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl( bool biasEnabled = bias.size() > 0; // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches). - BOOST_ASSERT(inputNum == 1); - BOOST_ASSERT(outputNum == 1); + ARMNN_ASSERT(inputNum == 1); + ARMNN_ASSERT(outputNum == 1); // If a bias is used, its size must equal the number of output channels. - BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels); + ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); // Note these tensors will use two (identical) batches. @@ -1627,7 +1627,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl( // If a bias is used, its size must equal the number of output channels. bool biasEnabled = bias.size() > 0; - BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels); + ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); // Creates the tensors. armnn::TensorInfo inputTensorInfo = @@ -2135,11 +2135,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl( bool biasEnabled = bias.size() > 0; // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches). - BOOST_ASSERT(inputNum == 1); - BOOST_ASSERT(outputNum == 1); + ARMNN_ASSERT(inputNum == 1); + ARMNN_ASSERT(outputNum == 1); // If a bias is used, its size must equal the number of output channels. - BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels); + ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); // Note these tensors will use two (identical) batches. |