aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRob Hughes <robert.hughes@arm.com>2019-12-16 17:10:51 +0000
committerDerek Lamberti <derek.lamberti@arm.com>2019-12-31 09:54:07 +0000
commitfc6bf05e536ee352a1b304c6acff36c6b9ea0ead (patch)
tree9b3541d20aa23cfae8be15caaf1e258aa12ea7d6
parent9be61282c8f1fdafa78c1acb33ff13857c6fc543 (diff)
downloadarmnn-fc6bf05e536ee352a1b304c6acff36c6b9ea0ead.tar.gz
Some build fixes for MSVC
Change-Id: I749430918b1268786690c3c8dc9fa2a9542d5d1d Signed-off-by: Robert Hughes <robert.hughes@arm.com>
-rw-r--r--cmake/GlobalConfig.cmake3
-rw-r--r--src/armnn/TypesUtils.cpp22
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp66
3 files changed, 52 insertions, 39 deletions
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index dd932d54bb..e8b57d7526 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -57,7 +57,8 @@ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
if(COMPILER_IS_GNU_LIKE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP")
+ # Disable C4996 (use of deprecated identifier) due to https://developercommunity.visualstudio.com/content/problem/252574/deprecated-compilation-warning-for-virtual-overrid.html
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP /wd4996")
add_definitions(-DNOMINMAX=1 -DNO_STRICT=1)
endif()
if("${CMAKE_SYSTEM_NAME}" STREQUAL Android)
diff --git a/src/armnn/TypesUtils.cpp b/src/armnn/TypesUtils.cpp
index 83c56c491c..f4f857f67a 100644
--- a/src/armnn/TypesUtils.cpp
+++ b/src/armnn/TypesUtils.cpp
@@ -7,6 +7,26 @@
#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
+namespace
+{
+/// Workaround for std:isnan() not being implemented correctly for integral types in MSVC.
+/// https://stackoverflow.com/a/56356405
+/// @{
+template <typename T, typename std::enable_if<std::is_integral<T>::value, T>::type* = nullptr>
+inline int IsNan(T x)
+{
+ // The spec defines integral types to be handled as if they were casted to doubles.
+ return std::isnan(static_cast<double>(x));
+}
+
+template <typename T, typename std::enable_if<!std::is_integral<T>::value, T>::type * = nullptr>
+inline int IsNan(T x)
+{
+ return std::isnan(x);
+}
+/// @}
+} // namespace std
+
template<typename QuantizedType>
QuantizedType armnn::Quantize(float value, float scale, int32_t offset)
{
@@ -28,7 +48,7 @@ float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
{
static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!std::isnan(value));
+ BOOST_ASSERT(!IsNan(value));
float dequantized = boost::numeric_cast<float>(value - offset) * scale;
return dequantized;
}
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 9a305bf361..acca01b14a 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -128,20 +128,18 @@ struct ParserFlatbuffersFixture
/// Executes the network with the given input tensor and checks the result against the given output tensor.
/// This assumes the network has a single input and a single output.
template <std::size_t NumOutputDimensions,
- armnn::DataType ArmnnType,
- typename DataType = armnn::ResolveType<ArmnnType>>
+ armnn::DataType ArmnnType>
void RunTest(size_t subgraphId,
- const std::vector<DataType>& inputData,
- const std::vector<DataType>& expectedOutputData);
+ const std::vector<armnn::ResolveType<ArmnnType>>& inputData,
+ const std::vector<armnn::ResolveType<ArmnnType>>& expectedOutputData);
/// Executes the network with the given input tensors and checks the results against the given output tensors.
/// This overload supports multiple inputs and multiple outputs, identified by name.
template <std::size_t NumOutputDimensions,
- armnn::DataType ArmnnType,
- typename DataType = armnn::ResolveType<ArmnnType>>
+ armnn::DataType ArmnnType>
void RunTest(size_t subgraphId,
- const std::map<std::string, std::vector<DataType>>& inputData,
- const std::map<std::string, std::vector<DataType>>& expectedOutputData);
+ const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType>>>& inputData,
+ const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType>>>& expectedOutputData);
/// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
/// Executes the network with the given input tensors and checks the results against the given output tensors.
@@ -149,12 +147,10 @@ struct ParserFlatbuffersFixture
/// the input datatype to be different to the output
template <std::size_t NumOutputDimensions,
armnn::DataType ArmnnType1,
- armnn::DataType ArmnnType2,
- typename DataType1 = armnn::ResolveType<ArmnnType1>,
- typename DataType2 = armnn::ResolveType<ArmnnType2>>
+ armnn::DataType ArmnnType2>
void RunTest(size_t subgraphId,
- const std::map<std::string, std::vector<DataType1>>& inputData,
- const std::map<std::string, std::vector<DataType2>>& expectedOutputData);
+ const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType1>>>& inputData,
+ const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData);
/// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
@@ -162,12 +158,10 @@ struct ParserFlatbuffersFixture
/// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
/// the input datatype to be different to the output
template<armnn::DataType ArmnnType1,
- armnn::DataType ArmnnType2,
- typename DataType1 = armnn::ResolveType<ArmnnType1>,
- typename DataType2 = armnn::ResolveType<ArmnnType2>>
+ armnn::DataType ArmnnType2>
void RunTest(std::size_t subgraphId,
- const std::map<std::string, std::vector<DataType1>>& inputData,
- const std::map<std::string, std::vector<DataType2>>& expectedOutputData);
+ const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType1>>>& inputData,
+ const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData);
static inline std::string GenerateDetectionPostProcessJsonString(
const armnn::DetectionPostProcessDescriptor& descriptor)
@@ -224,11 +218,10 @@ struct ParserFlatbuffersFixture
/// Executes the network with the given input tensor and checks the result against the given output tensor.
/// This overload assumes the network has a single input and a single output.
template <std::size_t NumOutputDimensions,
- armnn::DataType armnnType,
- typename DataType>
+ armnn::DataType armnnType>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
- const std::vector<DataType>& inputData,
- const std::vector<DataType>& expectedOutputData)
+ const std::vector<armnn::ResolveType<armnnType>>& inputData,
+ const std::vector<armnn::ResolveType<armnnType>>& expectedOutputData)
{
RunTest<NumOutputDimensions, armnnType>(subgraphId,
{ { m_SingleInputName, inputData } },
@@ -239,11 +232,10 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
/// Executes the network with the given input tensors and checks the results against the given output tensors.
/// This overload supports multiple inputs and multiple outputs, identified by name.
template <std::size_t NumOutputDimensions,
- armnn::DataType armnnType,
- typename DataType>
+ armnn::DataType armnnType>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
- const std::map<std::string, std::vector<DataType>>& inputData,
- const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+ const std::map<std::string, std::vector<armnn::ResolveType<armnnType>>>& inputData,
+ const std::map<std::string, std::vector<armnn::ResolveType<armnnType>>>& expectedOutputData)
{
RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
}
@@ -254,13 +246,13 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
/// the input datatype to be different to the output
template <std::size_t NumOutputDimensions,
armnn::DataType armnnType1,
- armnn::DataType armnnType2,
- typename DataType1,
- typename DataType2>
+ armnn::DataType armnnType2>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
- const std::map<std::string, std::vector<DataType1>>& inputData,
- const std::map<std::string, std::vector<DataType2>>& expectedOutputData)
+ const std::map<std::string, std::vector<armnn::ResolveType<armnnType1>>>& inputData,
+ const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData)
{
+ using DataType2 = armnn::ResolveType<armnnType2>;
+
// Setup the armnn input tensors from the given vectors.
armnn::InputTensors inputTensors;
for (auto&& it : inputData)
@@ -308,13 +300,13 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
/// This overload supports multiple inputs and multiple outputs, identified by name along with the allowance for
/// the input datatype to be different to the output.
template <armnn::DataType armnnType1,
- armnn::DataType armnnType2,
- typename DataType1,
- typename DataType2>
+ armnn::DataType armnnType2>
void ParserFlatbuffersFixture::RunTest(std::size_t subgraphId,
- const std::map<std::string, std::vector<DataType1>>& inputData,
- const std::map<std::string, std::vector<DataType2>>& expectedOutputData)
+ const std::map<std::string, std::vector<armnn::ResolveType<armnnType1>>>& inputData,
+ const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData)
{
+ using DataType2 = armnn::ResolveType<armnnType2>;
+
// Setup the armnn input tensors from the given vectors.
armnn::InputTensors inputTensors;
for (auto&& it : inputData)
@@ -345,7 +337,7 @@ void ParserFlatbuffersFixture::RunTest(std::size_t subgraphId,
// Checks the results.
for (auto&& it : expectedOutputData)
{
- std::vector<DataType2> out = outputStorage.at(it.first);
+ std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
{
for (unsigned int i = 0; i < out.size(); ++i)
{