aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2021-04-02 22:04:39 +0100
committerNikhil Raj <nikhil.raj@arm.com>2021-04-16 14:27:27 +0100
commit6dd178f2395b34cfb360eabb0130c19ed258f5fa (patch)
tree6814e4729bbd1e652d8b9c18c9db28f3bc2f8a8a
parented7fce413410d15c501ea52f9e6bfbbf71b3daf1 (diff)
downloadarmnn-6dd178f2395b34cfb360eabb0130c19ed258f5fa.tar.gz
IVGCVSW-5720 Remove the Caffe Parser from ArmNN
Signed-off-by: Nikhil Raj <nikhil.raj@arm.com> Change-Id: Ib00be204f549efa9aa5971ecf65c2dec4a10b10f
-rw-r--r--CMakeLists.txt58
-rw-r--r--cmake/AddDllCopyCommands.cmake21
-rw-r--r--cmake/GlobalConfig.cmake15
-rw-r--r--cmake/ParserVersion.cmake16
-rw-r--r--docker/x86_64/Dockerfile15
-rw-r--r--include/armnnCaffeParser/ICaffeParser.hpp66
-rw-r--r--include/armnnCaffeParser/Version.hpp29
-rw-r--r--python/pyarmnn/examples/image_classification/example_utils.py2
-rwxr-xr-xpython/pyarmnn/setup.py1
-rw-r--r--python/pyarmnn/src/pyarmnn/__init__.py12
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i103
-rwxr-xr-xpython/pyarmnn/swig_generate.py1
-rw-r--r--python/pyarmnn/test/test_caffe_parser.py131
-rw-r--r--python/pyarmnn/test/test_generated.py2
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp2271
-rw-r--r--src/armnnCaffeParser/CaffeParser.hpp189
-rw-r--r--src/armnnCaffeParser/RecordByRecordCaffeParser.cpp731
-rw-r--r--src/armnnCaffeParser/RecordByRecordCaffeParser.hpp53
-rw-r--r--src/armnnCaffeParser/test/TestAdd.cpp70
-rw-r--r--src/armnnCaffeParser/test/TestConcat.cpp73
-rw-r--r--src/armnnCaffeParser/test/TestConvolution.cpp133
-rw-r--r--src/armnnCaffeParser/test/TestDropout.cpp53
-rw-r--r--src/armnnCaffeParser/test/TestInPlace.cpp98
-rw-r--r--src/armnnCaffeParser/test/TestInputs.cpp122
-rw-r--r--src/armnnCaffeParser/test/TestMul.cpp73
-rw-r--r--src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp54
-rw-r--r--src/armnnCaffeParser/test/TestPooling2d.cpp54
-rw-r--r--src/armnnCaffeParser/test/TestSplit.cpp47
-rw-r--r--src/armnnConverter/ArmnnConverter.cpp26
-rw-r--r--tests/CMakeLists.txt76
-rw-r--r--tests/CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp35
-rw-r--r--tests/CaffeAlexNet-Armnn/Validation.txt1000
-rw-r--r--tests/CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp36
-rw-r--r--tests/CaffeCifar10AcrossChannels-Armnn/Validation.txt1000
-rw-r--r--tests/CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp42
-rw-r--r--tests/CaffeInception_BN-Armnn/Validation.txt1000
-rw-r--r--tests/CaffeMnist-Armnn/CaffeMnist-Armnn.cpp36
-rw-r--r--tests/CaffeMnist-Armnn/Validation.txt1000
-rw-r--r--tests/CaffePreprocessor.cpp44
-rw-r--r--tests/CaffePreprocessor.hpp40
-rw-r--r--tests/CaffeResNet-Armnn/CaffeResNet-Armnn.cpp45
-rw-r--r--tests/CaffeResNet-Armnn/Validation.txt2000
-rw-r--r--tests/CaffeSqueezeNet1_0-Armnn/CaffeSqueezeNet1_0-Armnn.cpp20
-rw-r--r--tests/CaffeVGG-Armnn/CaffeVGG-Armnn.cpp37
-rw-r--r--tests/CaffeVGG-Armnn/Validation.txt1000
-rw-r--r--tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp58
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp14
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp10
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp4
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.cpp8
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.hpp9
-rw-r--r--tests/InferenceModel.hpp2
-rw-r--r--tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp8
-rw-r--r--tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp6
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp3
-rw-r--r--tests/TfResNext_Quantized-Armnn/TfResNext_Quantized-Armnn.cpp4
56 files changed, 26 insertions, 12030 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8878065478..c02db3d014 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -99,34 +99,6 @@ list(APPEND armnnUtils_sources
add_library_ex(armnnUtils STATIC ${armnnUtils_sources})
target_include_directories(armnnUtils PRIVATE src/backends)
-if(BUILD_CAFFE_PARSER)
- # ArmNN Parser source files required for all build options
- set(armnn_caffe_parser_sources)
- list(APPEND armnn_caffe_parser_sources
- include/armnnCaffeParser/ICaffeParser.hpp
- include/armnnCaffeParser/Version.hpp
- src/armnnCaffeParser/RecordByRecordCaffeParser.hpp
- src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
- src/armnnCaffeParser/CaffeParser.hpp
- src/armnnCaffeParser/CaffeParser.cpp
- ${CAFFE_GENERATED_SOURCES}/caffe/proto/caffe.pb.cc
- )
- # The generated Caffe protobuf .cc file is not warning clean and we can't fix them.
- if(COMPILER_IS_GNU_LIKE)
- set_source_files_properties(${CAFFE_GENERATED_SOURCES}/caffe/proto/caffe.pb.cc PROPERTIES COMPILE_FLAGS "-Wno-unused-variable -Wno-unused-parameter -Wno-conversion -Wno-sign-conversion")
- endif()
-
- add_library_ex(armnnCaffeParser SHARED ${armnn_caffe_parser_sources})
- set_target_properties(armnnCaffeParser PROPERTIES COMPILE_FLAGS "${CAFFE_PARSER_ADDITIONAL_COMPILE_FLAGS}")
-
- target_include_directories(armnnCaffeParser PRIVATE src/armnnUtils)
-
- target_link_libraries(armnnCaffeParser armnn)
- target_link_libraries(armnnCaffeParser ${PROTOBUF_LIBRARIES})
- set_target_properties(armnnCaffeParser PROPERTIES VERSION ${CAFFE_PARSER_LIB_VERSION} SOVERSION ${CAFFE_PARSER_LIB_SOVERSION})
-
-endif()
-
if(BUILD_ONNX_PARSER)
set(armnn_onnx_parser_sources)
list(APPEND armnn_onnx_parser_sources
@@ -629,11 +601,6 @@ endif()
add_subdirectory(third-party/fmt)
target_link_libraries(armnn fmt)
-if(BUILD_CAFFE_PARSER)
- install(TARGETS armnnCaffeParser
- LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
- RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
-endif()
if(BUILD_ONNX_PARSER)
install(TARGETS armnnOnnxParser
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
@@ -872,21 +839,6 @@ if(BUILD_UNIT_TESTS)
list(APPEND unittest_sources ${CMAKE_CURRENT_BINARY_DIR}/SchemaText.cpp)
endif()
- if(BUILD_CAFFE_PARSER AND ARMNNREF)
- list(APPEND unittest_sources
- src/armnnCaffeParser/test/TestAdd.cpp
- src/armnnCaffeParser/test/TestConcat.cpp
- src/armnnCaffeParser/test/TestConvolution.cpp
- src/armnnCaffeParser/test/TestDropout.cpp
- src/armnnCaffeParser/test/TestInputs.cpp
- src/armnnCaffeParser/test/TestMul.cpp
- src/armnnCaffeParser/test/TestPooling2d.cpp
- src/armnnCaffeParser/test/TestInPlace.cpp
- src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp
- src/armnnCaffeParser/test/TestSplit.cpp
- )
- endif()
-
if(BUILD_ONNX_PARSER AND ARMNNREF)
list(APPEND unittest_sources
src/armnnOnnxParser/test/Addition.cpp
@@ -1026,10 +978,6 @@ if(BUILD_UNIT_TESTS)
target_link_libraries(UnitTests armnnTfLiteParser)
endif()
- if(BUILD_CAFFE_PARSER)
- target_link_libraries(UnitTests armnnCaffeParser)
- endif()
-
if(BUILD_ARMNN_SERIALIZER AND ARMNNREF)
target_include_directories(UnitTests SYSTEM PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src/armnnSerializer)
target_include_directories(UnitTests SYSTEM PRIVATE "${FLATBUFFERS_INCLUDE_PATH}")
@@ -1053,7 +1001,7 @@ if(BUILD_UNIT_TESTS)
addDllCopyCommands(UnitTests)
endif()
-if (BUILD_ARMNN_SERIALIZER AND (BUILD_TF_PARSER OR BUILD_TF_LITE_PARSER OR BUILD_ONNX_PARSER OR BUILD_CAFFE_PARSER) AND ARMNNREF)
+if (BUILD_ARMNN_SERIALIZER AND (BUILD_TF_PARSER OR BUILD_TF_LITE_PARSER OR BUILD_ONNX_PARSER) AND ARMNNREF)
set(ArmnnConverter_sources
src/armnnConverter/ArmnnConverter.cpp)
@@ -1061,10 +1009,6 @@ if (BUILD_ARMNN_SERIALIZER AND (BUILD_TF_PARSER OR BUILD_TF_LITE_PARSER OR BUILD
target_include_directories(ArmnnConverter PRIVATE src/armnn)
target_include_directories(ArmnnConverter PRIVATE src/armnnUtils)
- if(BUILD_CAFFE_PARSER)
- target_link_libraries(ArmnnConverter armnnCaffeParser)
- endif()
-
if(BUILD_ONNX_PARSER)
target_link_libraries(ArmnnConverter armnnOnnxParser)
endif()
diff --git a/cmake/AddDllCopyCommands.cmake b/cmake/AddDllCopyCommands.cmake
index ff57bb173d..6190170dbc 100644
--- a/cmake/AddDllCopyCommands.cmake
+++ b/cmake/AddDllCopyCommands.cmake
@@ -39,14 +39,6 @@ macro(addDllCopyCommands target)
addDllCopyCommand(${target} "$<TARGET_FILE_DIR:armnn>/armnn.dll" "$<TARGET_FILE_DIR:armnn>/armnn.dll")
endif()
- # armnnCaffeParser.dll
- if ("armnnCaffeParser" IN_LIST target_deps)
- addDllCopyCommand(${target} "$<TARGET_FILE_DIR:armnnCaffeParser>/armnnCaffeParser.dll"
- "$<TARGET_FILE_DIR:armnnCaffeParser>/armnnCaffeParser.dll")
- addDllCopyCommand(${target} "${PROTOBUF_ROOT}/bin/libprotobufd.dll"
- "${PROTOBUF_ROOT}/bin/libprotobuf.dll")
- endif()
-
# armnnTfParser.dll
if ("armnnTfParser" IN_LIST target_deps)
addDllCopyCommand(${target} "$<TARGET_FILE_DIR:armnnTfParser>/armnnTfParser.dll"
@@ -60,18 +52,5 @@ macro(addDllCopyCommands target)
addDllCopyCommand(${target} "$<TARGET_FILE_DIR:armnnTfLiteParser>/armnnTfLiteParser.dll"
"$<TARGET_FILE_DIR:armnnTfLiteParser>/armnnTfLiteParser.dll")
endif()
-
- # caffe.dll and its dependencies
- listContainsRegex(includeCaffeDlls "${target_deps}" "caffe")
- if (${includeCaffeDlls})
- addDllCopyCommand(${target} "${CAFFE_BUILD_ROOT}/lib/caffe-d.dll"
- "${CAFFE_BUILD_ROOT}/lib/caffe.dll")
- addDllCopyCommand(${target} "${PROTOBUF_ROOT}/bin/libprotobufd.dll"
- "${PROTOBUF_ROOT}/bin/libprotobuf.dll")
- addDllCopyCommand(${target} "${BLAS_ROOT}/bin/libopenblas.dll" "${BLAS_ROOT}/bin/libopenblas.dll")
- addDllCopyCommand(${target} "${MINGW32_ROOT}/bin/libgfortran-3.dll" "${MINGW32_ROOT}/bin/libgfortran-3.dll")
- addDllCopyCommand(${target} "${MINGW32_ROOT}/bin/libgcc_s_dw2-1.dll" "${MINGW32_ROOT}/bin/libgcc_s_dw2-1.dll")
- addDllCopyCommand(${target} "${MINGW32_ROOT}/bin/libquadmath-0.dll" "${MINGW32_ROOT}/bin/libquadmath-0.dll")
- endif()
endif()
endmacro()
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index 8a7c9a1821..031974814b 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -3,7 +3,6 @@
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
#
-option(BUILD_CAFFE_PARSER "Build Caffe parser" OFF)
option(BUILD_TF_PARSER "Build Tensorflow parser" OFF)
option(BUILD_ONNX_PARSER "Build Onnx parser" OFF)
option(BUILD_UNIT_TESTS "Build unit tests" ON)
@@ -159,7 +158,7 @@ endif()
find_dependency(Threads)
# Favour the protobuf passed on command line
-if(BUILD_TF_PARSER OR BUILD_CAFFE_PARSER OR BUILD_ONNX_PARSER)
+if(BUILD_TF_PARSER OR BUILD_ONNX_PARSER)
find_library(PROTOBUF_LIBRARY_DEBUG NAMES "protobufd"
PATHS ${PROTOBUF_ROOT}/lib
NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
@@ -181,15 +180,6 @@ if(BUILD_TF_PARSER OR BUILD_CAFFE_PARSER OR BUILD_ONNX_PARSER)
add_definitions(-DPROTOBUF_USE_DLLS)
endif()
-# Caffe and its dependencies
-if(BUILD_CAFFE_PARSER)
- add_definitions(-DARMNN_CAFFE_PARSER)
-
- find_path(CAFFE_GENERATED_SOURCES "caffe/proto/caffe.pb.h"
- HINTS ${CAFFE_BUILD_ROOT}/include)
- include_directories(SYSTEM "${CAFFE_GENERATED_SOURCES}")
-endif()
-
if(BUILD_TF_PARSER)
add_definitions(-DARMNN_TF_PARSER)
@@ -416,9 +406,6 @@ else()
endif()
-if(NOT BUILD_CAFFE_PARSER)
- message(STATUS "Caffe parser support is disabled")
-endif()
if(NOT BUILD_TF_PARSER)
message(STATUS "Tensorflow parser support is disabled")
diff --git a/cmake/ParserVersion.cmake b/cmake/ParserVersion.cmake
index e7a5234506..2120bf7057 100644
--- a/cmake/ParserVersion.cmake
+++ b/cmake/ParserVersion.cmake
@@ -3,22 +3,6 @@
# SPDX-License-Identifier: MIT
#
-# Read the CaffeParser version components from file
-file(READ ${CMAKE_CURRENT_LIST_DIR}/../include/armnnCaffeParser/Version.hpp caffeVersion)
-
-# Parse the CaffeParser version components
-string(REGEX MATCH "#define CAFFE_PARSER_MAJOR_VERSION ([0-9]*)" _ ${caffeVersion})
-set(CAFFE_PARSER_MAJOR_VERSION ${CMAKE_MATCH_1})
-string(REGEX MATCH "#define CAFFE_PARSER_MINOR_VERSION ([0-9]*)" _ ${caffeVersion})
-set(CAFFE_PARSER_MINOR_VERSION ${CMAKE_MATCH_1})
-
-# Define LIB version
-set(CAFFE_PARSER_LIB_VERSION "${CAFFE_PARSER_MAJOR_VERSION}.${CAFFE_PARSER_MINOR_VERSION}")
-
-# Define LIB soversion
-set(CAFFE_PARSER_LIB_SOVERSION "${CAFFE_PARSER_MAJOR_VERSION}")
-
-
# Read the OnnxParser version components from file
file(READ ${CMAKE_CURRENT_LIST_DIR}/../include/armnnOnnxParser/Version.hpp onnxVersion)
diff --git a/docker/x86_64/Dockerfile b/docker/x86_64/Dockerfile
index c03a1d7407..684b5cf34b 100644
--- a/docker/x86_64/Dockerfile
+++ b/docker/x86_64/Dockerfile
@@ -105,10 +105,6 @@ RUN apt-get update && apt-get install -y \
libatlas-base-dev
-# Download caffe 1.0
-RUN cd $HOME && git clone https://github.com/BVLC/caffe.git && \
- cd caffe && git checkout eeebdab16155d34ff8f5f42137da7df4d1c7eab0
-
# Makefile update
# To Do: Don't copy the Local Make file to docker
# RUN cd $HOME/caffe/ && rm Makefile.config.example
@@ -123,15 +119,6 @@ RUN apt-get update && apt-get install -y \
# ENV PATH=$HOME/armnn-devenv/google/x86_64_pb_install/bin/:$PATH
# ENV LD_LIBRARY_PATH=$HOME/armnn-devenv/google/x86_64_pb_install/lib/:$LD_LIBRARY_PATH
-# Compile CAFFE
-RUN cd $HOME/caffe/ && mkdir build && cd build && \
- export PATH=$HOME/armnn-devenv/google/x86_64_pb_install/bin/:$PATH && \
- export LD_LIBRARY_PATH=$HOME/armnn-devenv/google/x86_64_pb_install/lib/:$LD_LIBRARY_PATH && \
- ldconfig && \
- cmake -DCMAKE_CXX_FLAGS=--std=c++11 ../ && \
- make all -j$(nproc) && \
- make test -j$(nproc) && \
- make runtest -j$(nproc)
# Build Boost library for arm64
RUN cd $HOME && wget http://downloads.sourceforge.net/project/boost/boost/1.64.0/boost_1_64_0.tar.gz && \
@@ -211,8 +198,6 @@ RUN cd $HOME/armnn-devenv && \
-DARMCOMPUTE_BUILD_DIR=$HOME/armnn-devenv/ComputeLibrary/build/ \
-DBOOST_ROOT=$HOME/armnn-devenv/boost_arm64_install/ \
-DARMCOMPUTENEON=1 -DARMCOMPUTECL=1 -DARMNNREF=1 \
- -DCAFFE_GENERATED_SOURCES=$HOME/caffe/build/include \
- -DBUILD_CAFFE_PARSER=1 \
-DONNX_GENERATED_SOURCES=$HOME/armnn-devenv/onnx \
-DBUILD_ONNX_PARSER=1 \
-DTF_GENERATED_SOURCES=$HOME/armnn-devenv/tensorflow-protobuf \
diff --git a/include/armnnCaffeParser/ICaffeParser.hpp b/include/armnnCaffeParser/ICaffeParser.hpp
deleted file mode 100644
index 0e31ad4461..0000000000
--- a/include/armnnCaffeParser/ICaffeParser.hpp
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include "armnn/Types.hpp"
-#include "armnn/NetworkFwd.hpp"
-#include "armnn/Tensor.hpp"
-#include "armnn/INetwork.hpp"
-
-#include <memory>
-#include <map>
-#include <vector>
-
-namespace armnnCaffeParser
-{
-
-using BindingPointInfo = armnn::BindingPointInfo;
-
-class ICaffeParser;
-using ICaffeParserPtr = std::unique_ptr<ICaffeParser, void(*)(ICaffeParser* parser)>;
-
-class ICaffeParser
-{
-public:
- static ICaffeParser* CreateRaw();
- static ICaffeParserPtr Create();
- static void Destroy(ICaffeParser* parser);
-
- /// Create the network from a protobuf text file on the disk.
- armnn::INetworkPtr CreateNetworkFromTextFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs);
-
- /// Create the network from a protobuf binary file on the disk.
- armnn::INetworkPtr CreateNetworkFromBinaryFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs);
-
- /// Create the network directly from protobuf text in a string. Useful for debugging/testin.g
- armnn::INetworkPtr CreateNetworkFromString(
- const char* protoText,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs);
-
- /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name.
- BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const;
-
- /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name.
- BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const;
-
-private:
- friend class CaffeParser;
- friend class RecordByRecordCaffeParser;
-
- ICaffeParser();
- ~ICaffeParser();
-
- class CaffeParserImpl;
- std::unique_ptr<CaffeParserImpl> pCaffeParserImpl;
-};
-
-} \ No newline at end of file
diff --git a/include/armnnCaffeParser/Version.hpp b/include/armnnCaffeParser/Version.hpp
deleted file mode 100644
index 6e7ce5a539..0000000000
--- a/include/armnnCaffeParser/Version.hpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-namespace armnnCaffeParser
-{
-
-/// Macro utils
-#define STRINGIFY_VALUE(s) STRINGIFY_MACRO(s)
-#define STRINGIFY_MACRO(s) #s
-
-// CaffeParser version components
-#define CAFFE_PARSER_MAJOR_VERSION 24
-#define CAFFE_PARSER_MINOR_VERSION 1
-#define CAFFE_PARSER_PATCH_VERSION 0
-
-/// CAFFE_PARSER_VERSION: "X.Y.Z"
-/// where:
-/// X = Major version number
-/// Y = Minor version number
-/// Z = Patch version number
-#define CAFFE_PARSER_VERSION STRINGIFY_VALUE(CAFFE_PARSER_MAJOR_VERSION) "." \
- STRINGIFY_VALUE(CAFFE_PARSER_MINOR_VERSION) "." \
- STRINGIFY_VALUE(CAFFE_PARSER_PATCH_VERSION)
-
-} //namespace armnnCaffeParser \ No newline at end of file
diff --git a/python/pyarmnn/examples/image_classification/example_utils.py b/python/pyarmnn/examples/image_classification/example_utils.py
index f0ba91e981..bd43d60da5 100644
--- a/python/pyarmnn/examples/image_classification/example_utils.py
+++ b/python/pyarmnn/examples/image_classification/example_utils.py
@@ -72,7 +72,7 @@ def parse_command_line(desc: str = ""):
parser.add_argument("-d", "--data-dir", help="Data directory which contains all the images.",
action="store", default="")
parser.add_argument("-m", "--model-dir",
- help="Model directory which contains the model file (TF, TFLite, ONNX, Caffe).", action="store",
+ help="Model directory which contains the model file (TF, TFLite, ONNX).", action="store",
default="")
return parser.parse_args()
diff --git a/python/pyarmnn/setup.py b/python/pyarmnn/setup.py
index e1eba4b082..9d9f561ef9 100755
--- a/python/pyarmnn/setup.py
+++ b/python/pyarmnn/setup.py
@@ -283,7 +283,6 @@ if __name__ == '__main__':
ext_list.append(pyarmnn_optional_module)
- add_parsers_ext('CaffeParser', extensions_to_build)
add_parsers_ext('OnnxParser', extensions_to_build)
add_parsers_ext('TfParser', extensions_to_build)
add_parsers_ext('TfLiteParser', extensions_to_build)
diff --git a/python/pyarmnn/src/pyarmnn/__init__.py b/python/pyarmnn/src/pyarmnn/__init__.py
index 19b14a4b09..410e66be11 100644
--- a/python/pyarmnn/src/pyarmnn/__init__.py
+++ b/python/pyarmnn/src/pyarmnn/__init__.py
@@ -7,18 +7,6 @@ import logging
from ._generated.pyarmnn_version import GetVersion, GetMajorVersion, GetMinorVersion
# Parsers
-try:
- from ._generated.pyarmnn_caffeparser import ICaffeParser
-except ImportError as err:
- logger = logging.getLogger(__name__)
- message = "Your ArmNN library instance does not support Caffe models parser functionality. "
- logger.warning("%s Skipped ICaffeParser import.", message)
- logger.debug(str(err))
-
-
- def ICaffeParser():
- """In case people try importing without having Arm NN built with this parser."""
- raise RuntimeError(message)
try:
from ._generated.pyarmnn_onnxparser import IOnnxParser
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i b/python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i
deleted file mode 100644
index 538b486aad..0000000000
--- a/python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i
+++ /dev/null
@@ -1,103 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-%module pyarmnn_caffeparser
-%{
-#define SWIG_FILE_WITH_INIT
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "armnn/INetwork.hpp"
-%}
-
-//typemap definitions and other common stuff
-%include "standard_header.i"
-
-namespace std {
- %template(BindingPointInfo) pair<int, armnn::TensorInfo>;
- %template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
- %template(StringVector) vector<string>;
-}
-
-namespace armnnCaffeParser
-{
-
-%feature("docstring",
-"
-Interface for creating a parser object using Caffe (http://caffe.berkeleyvision.org/) caffemodel files.
-
-Parsers are used to automatically construct Arm NN graphs from model files.
-
-") ICaffeParser;
-
-%nodefaultctor ICaffeParser;
-class ICaffeParser
-{
-public:
- // Documentation
- %feature("docstring",
- "
- Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name.
-
- Args:
- name (str): Name of the input.
-
- Returns:
- tuple: (`int`, `TensorInfo`)
- ") GetNetworkInputBindingInfo;
-
- %feature("docstring",
- "
- Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name.
-
- Args:
- name (str): Name of the output.
-
- Returns:
- tuple: (`int`, `TensorInfo`)
- ") GetNetworkOutputBindingInfo;
-
- std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(const std::string& name);
- std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(const std::string& name);
-};
-
-%extend ICaffeParser {
- // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
- // method for ICaffeParser python object that will use static factory method to do the job.
-
- ICaffeParser() {
- return armnnCaffeParser::ICaffeParser::CreateRaw();
- }
-
- // The following does not replace a real destructor of the Armnn class.
- // It creates a functions that will be called when swig object goes out of the scope to clean resources.
- // so the user doesn't need to call ICaffeParser::Destroy himself.
- // $self` is a pointer to extracted ArmNN ICaffeParser object.
-
- ~ICaffeParser() {
- armnnCaffeParser::ICaffeParser::Destroy($self);
- }
-
- %feature("docstring",
- "
- Create the network from a Caffe caffemodel binary file on disk.
-
- Args:
- graphFile: Path to the caffe model to be parsed.
- inputShapes (tuple): (`string`, `TensorShape`) A tuple containing the input name and TensorShape information for the network.
- requestedOutputs (list): A list of the output tensor names.
-
- Returns:
- INetwork: INetwork object for the parsed Caffe model.
- ") CreateNetworkFromBinaryFile;
-
- %newobject CreateNetworkFromBinaryFile;
- armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs) {
- return $self->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs).release();
- }
-}
-}
-
-// Clear exception typemap.
-%exception;
diff --git a/python/pyarmnn/swig_generate.py b/python/pyarmnn/swig_generate.py
index 72bccbf49e..a2352c9063 100755
--- a/python/pyarmnn/swig_generate.py
+++ b/python/pyarmnn/swig_generate.py
@@ -107,7 +107,6 @@ if __name__ == "__main__":
wrap_names = ['armnn_version',
'armnn',
- 'armnn_caffeparser',
'armnn_onnxparser',
'armnn_tfparser',
'armnn_tfliteparser',
diff --git a/python/pyarmnn/test/test_caffe_parser.py b/python/pyarmnn/test/test_caffe_parser.py
deleted file mode 100644
index d744b907d4..0000000000
--- a/python/pyarmnn/test/test_caffe_parser.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright © 2020 Arm Ltd. All rights reserved.
-# SPDX-License-Identifier: MIT
-import os
-
-import pytest
-import pyarmnn as ann
-import numpy as np
-
-
-@pytest.fixture()
-def parser(shared_data_folder):
- """
- Parse and setup the test network to be used for the tests below
- """
-
- # Create caffe parser
- parser = ann.ICaffeParser()
-
- # Specify path to model
- path_to_model = os.path.join(shared_data_folder, 'mock_model.caffemodel')
-
- # Specify the tensor shape relative to the input [1, 1, 28, 28]
- tensor_shape = {'Placeholder': ann.TensorShape((1, 1, 28, 28))}
-
- # Specify the requested_outputs
- requested_outputs = ["output"]
-
- # Parse caffe binary & create network
- parser.CreateNetworkFromBinaryFile(path_to_model, tensor_shape, requested_outputs)
-
- yield parser
-
-
-def test_caffe_parser_swig_destroy():
- assert ann.ICaffeParser.__swig_destroy__, "There is a swig python destructor defined"
- assert ann.ICaffeParser.__swig_destroy__.__name__ == "delete_ICaffeParser"
-
-
-def test_check_caffe_parser_swig_ownership(parser):
- # Check to see that SWIG has ownership for parser. This instructs SWIG to take
- # ownership of the return value. This allows the value to be automatically
- # garbage-collected when it is no longer in use
- assert parser.thisown
-
-
-def test_get_network_input_binding_info(parser):
- input_binding_info = parser.GetNetworkInputBindingInfo("Placeholder")
-
- tensor = input_binding_info[1]
- assert tensor.GetDataType() == 1
- assert tensor.GetNumDimensions() == 4
- assert tensor.GetNumElements() == 784
-
-
-def test_get_network_output_binding_info(parser):
- output_binding_info1 = parser.GetNetworkOutputBindingInfo("output")
-
- # Check the tensor info retrieved from GetNetworkOutputBindingInfo
- tensor1 = output_binding_info1[1]
-
- assert tensor1.GetDataType() == 1
- assert tensor1.GetNumDimensions() == 2
- assert tensor1.GetNumElements() == 10
-
-
-def test_filenotfound_exception(shared_data_folder):
- parser = ann.ICaffeParser()
-
- # path to model
- path_to_model = os.path.join(shared_data_folder, 'some_unknown_network.caffemodel')
-
- # generic tensor shape [1, 1, 1, 1]
- tensor_shape = {'data': ann.TensorShape((1, 1, 1, 1))}
-
- # requested_outputs
- requested_outputs = [""]
-
- with pytest.raises(RuntimeError) as err:
- parser.CreateNetworkFromBinaryFile(path_to_model, tensor_shape, requested_outputs)
-
- # Only check for part of the exception since the exception returns
- # absolute path which will change on different machines.
- assert 'Failed to open graph file' in str(err.value)
-
-
-def test_caffe_parser_end_to_end(shared_data_folder):
- parser = ann.ICaffeParser = ann.ICaffeParser()
-
- # Load the network specifying the inputs and outputs
- input_name = "Placeholder"
- tensor_shape = {input_name: ann.TensorShape((1, 1, 28, 28))}
- requested_outputs = ["output"]
-
- network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.caffemodel'),
- tensor_shape, requested_outputs)
-
- # Specify preferred backend
- preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
-
- input_binding_info = parser.GetNetworkInputBindingInfo(input_name)
-
- options = ann.CreationOptions()
- runtime = ann.IRuntime(options)
-
- opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
-
- assert 0 == len(messages)
-
- net_id, messages = runtime.LoadNetwork(opt_network)
-
- assert "" == messages
-
- # Load test image data stored in input_caffe.npy
- input_tensor_data = np.load(os.path.join(shared_data_folder, 'caffe_parser/input_caffe.npy')).astype(np.float32)
- input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
-
- # Load output binding info and
- outputs_binding_info = []
- for output_name in requested_outputs:
- outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(output_name))
- output_tensors = ann.make_output_tensors(outputs_binding_info)
-
- runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
-
- output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
-
- # Load golden output file for result comparison.
- expected_output = np.load(os.path.join(shared_data_folder, 'caffe_parser/golden_output_caffe.npy'))
-
- # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this)
- np.testing.assert_almost_equal(output_vectors[0], expected_output, 4)
diff --git a/python/pyarmnn/test/test_generated.py b/python/pyarmnn/test/test_generated.py
index 0e1d663234..f27d565c2b 100644
--- a/python/pyarmnn/test/test_generated.py
+++ b/python/pyarmnn/test/test_generated.py
@@ -7,7 +7,6 @@ import pytest
import pyarmnn._generated.pyarmnn as generated_armnn
import pyarmnn._generated.pyarmnn as generated_deserializer
-import pyarmnn._generated.pyarmnn_caffeparser as generated_caffe
import pyarmnn._generated.pyarmnn_onnxparser as generated_onnx
import pyarmnn._generated.pyarmnn_tfliteparser as generated_tflite
import pyarmnn._generated.pyarmnn_tfparser as generated_tf
@@ -28,7 +27,6 @@ def get_classes(swig_independent_classes: Tuple):
return list(filter(lambda x: x[0] not in ignored_class_names,
inspect.getmembers(generated_armnn, inspect.isclass) +
inspect.getmembers(generated_deserializer, inspect.isclass) +
- inspect.getmembers(generated_caffe, inspect.isclass) +
inspect.getmembers(generated_tflite, inspect.isclass) +
inspect.getmembers(generated_onnx, inspect.isclass) +
inspect.getmembers(generated_tf, inspect.isclass)))
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
deleted file mode 100644
index 6a744f7f23..0000000000
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ /dev/null
@@ -1,2271 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "CaffeParser.hpp"
-#include "RecordByRecordCaffeParser.hpp"
-
-#include "armnnCaffeParser/Version.hpp"
-
-#include "armnn/Descriptors.hpp"
-#include "armnn/INetwork.hpp"
-#include "armnn/Utils.hpp"
-#include "armnn/Exceptions.hpp"
-
-#include "GraphTopologicalSort.hpp"
-#include "VerificationHelpers.hpp"
-
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/NumericCast.hpp>
-
-#include <fmt/format.h>
-
-// Caffe
-#include "caffe/proto/caffe.pb.h"
-
-// ProtoBuf
-#include <google/protobuf/io/coded_stream.h>
-#include <google/protobuf/io/zero_copy_stream.h>
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <google/protobuf/text_format.h>
-#include <google/protobuf/stubs/common.h>
-#include <google/protobuf/stubs/once.h>
-#include <google/protobuf/io/coded_stream.h>
-#include <google/protobuf/descriptor.h>
-#include <google/protobuf/generated_message_reflection.h>
-#include <google/protobuf/reflection_ops.h>
-#include <google/protobuf/wire_format.h>
-
-#include <cmath>
-#include <iostream>
-#include <sstream>
-#include <queue>
-#include <fcntl.h>
-
-/// Caffe networks are loaded from protobuf files (binary or text) using the protobuf library and the generated
-/// code from caffe.pb.h. This gives us a caffe::NetParameter which is an in-memory version of the file.
-/// This contains a flat list of Caffe 'layers' (e.g. convolution, pooling etc.).
-/// Each layer has inputs (called "bottoms") and outputs (called "tops"). Data flows from bottom to top.
-/// The bottoms of a layer refer to the tops of other layers, not their names.
-/// The names of layers seem to be arbitrary (you could rename a layer and the network wouldn't
-/// need any other changes).
-///
-/// Some layers (e.g. Relu) can be configured so that their top and bottom are both the same. This is called an
-/// "in-place" layer and is a Caffe runtime feature used to reduce memory usage by modifying tensors in-place.
-/// This isn't relevant to the parser and so we preprocess these layers to convert them to regular layers, to result
-/// in a consistent graph structure.
-
-namespace armnnCaffeParser
-{
-
-using namespace armnn;
-using namespace caffe;
-using namespace std;
-using namespace google::protobuf::io;
-
-ICaffeParser::ICaffeParser() : pCaffeParserImpl(new RecordByRecordCaffeParser()) {}
-
-ICaffeParser::~ICaffeParser() = default;
-
-ICaffeParser* ICaffeParser::CreateRaw()
-{
- return new ICaffeParser();
-}
-
-ICaffeParserPtr ICaffeParser::Create()
-{
- return ICaffeParserPtr(CreateRaw(), &ICaffeParser::Destroy);
-}
-
-void ICaffeParser::Destroy(ICaffeParser* parser)
-{
- delete parser;
-}
-
-armnn::INetworkPtr ICaffeParser::CreateNetworkFromTextFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
- return pCaffeParserImpl->CreateNetworkFromTextFile(graphFile, inputShapes, requestedOutputs);
-}
-
-armnn::INetworkPtr ICaffeParser::CreateNetworkFromBinaryFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
- return pCaffeParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes,requestedOutputs);
-}
-
-armnn::INetworkPtr ICaffeParser::CreateNetworkFromString(
- const char* protoText,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
- return pCaffeParserImpl->CreateNetworkFromString(protoText, inputShapes, requestedOutputs);
-}
-
-BindingPointInfo ICaffeParser::GetNetworkInputBindingInfo(const std::string& name) const
-{
- return pCaffeParserImpl->GetNetworkInputBindingInfo(name);
-}
-
-BindingPointInfo ICaffeParser::GetNetworkOutputBindingInfo(const std::string& name) const
-{
- return pCaffeParserImpl->GetNetworkOutputBindingInfo(name);
-}
-
-namespace
-{
-
-const float* GetArrayPtrFromBlob(const LayerParameter& layerParam, unsigned int blobIndex)
-{
- auto nBlobs = layerParam.blobs_size();
- if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
- {
- throw ParseException(
- fmt::format("Expected data blob at index {} in layer {} not found. nBlobs={}. {}",
- blobIndex,
- layerParam.name(),
- nBlobs,
- CHECK_LOCATION().AsString()));
- }
-
- const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
-
- const float* arrayPtr = blob.data().data();
- return arrayPtr;
-}
-
-void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, unsigned int blobIndex)
-{
- auto nBlobs = layerParam.blobs_size();
- if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
- {
- throw ParseException(
- fmt::format("Expected data blob at index {} in layer {} not found. {}",
- blobIndex,
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
-
- const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
-
- size_t blobSize = armnn::numeric_cast<size_t>(blob.data_size());
- if (blobSize != outData.size())
- {
- throw ParseException(
- fmt::format("Data blob at index {} in layer {} has an unexpected size. "
- "Expected {} elements but got {} elements. {}",
- blobIndex,
- layerParam.name(),
- outData.size(),
- blobSize,
- CHECK_LOCATION().AsString()));
- }
-
- int outSizeInt = armnn::numeric_cast<int>(outData.size());
- for (int i = 0; i < outSizeInt; ++i)
- {
- outData[static_cast<size_t>(i)] = blob.data(i);
- }
-}
-
-template <typename T>
-size_t SizeOfVectorData(const vector<T>& vec)
-{
- return vec.size() * sizeof(T);
-}
-
-void ValidateNumInputsOutputs(const caffe::LayerParameter& layerParameter,
- unsigned int numInputs,
- unsigned int numOutputs)
-{
- int numInputsActual = layerParameter.bottom_size();
- if (numInputs != armnn::numeric_cast<unsigned int>(numInputsActual))
- {
- throw ParseException(
- fmt::format("Invalid number of inputs requested {} for layer {} "
- "while only {} present. {}",
- numInputs,
- layerParameter.name(),
- numInputsActual,
- CHECK_LOCATION().AsString()));
- }
-
- int numOutputsActual = layerParameter.top_size();
- if (numOutputs != armnn::numeric_cast<unsigned int>(numOutputsActual))
- {
- throw ParseException(
- fmt::format("Invalid number of outputs requested {} for layer {} "
- "while only {} present. {}",
- numOutputs,
- layerParameter.name(),
- numOutputsActual,
- CHECK_LOCATION().AsString()));
- }
-}
-
-template <typename ParamType, typename ExtractOptional, typename ExtractFallback, typename ValueType>
-ValueType GetOptionalWithFallback(const ParamType& param,
- ExtractOptional extractOptional,
- ExtractFallback extractFallback,
- ValueType defaultValue)
-{
- auto optValue = extractOptional(param, defaultValue);
- if (optValue.first)
- {
- return optValue.second;
- }
- auto fallbackValue = extractFallback(param, defaultValue);
- return fallbackValue.second;
-}
-
-#define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, \
- PARAM_TYPE, \
- OPTIONAL_VALUE, \
- FALLBACK_VECTOR, \
- VALUE_TYPE, \
- DEFAULT_VALUE) \
- GetOptionalWithFallback( \
- PARAM, \
- [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
- { \
- if (param.has_##OPTIONAL_VALUE ()) \
- { \
- return std::make_pair(true, param.OPTIONAL_VALUE ()); \
- } \
- else \
- { \
- return std::make_pair(false, defaultValue); \
- } \
- }, \
- [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
- { \
- if (param.FALLBACK_VECTOR##_size() > 0) \
- { \
- return std::make_pair(true, (param.FALLBACK_VECTOR ()).Get(0)); \
- } \
- else \
- { \
- return std::make_pair(false, defaultValue); \
- } \
- }, \
- DEFAULT_VALUE)
-
-#define GET_OPTIONAL_WITH_FALLBACK(PARAM, \
- PARAM_TYPE, \
- OPTIONAL_VALUE, \
- FALLBACK_VALUE, \
- VALUE_TYPE, \
- DEFAULT_VALUE) \
- GetOptionalWithFallback( \
- PARAM, \
- [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
- { \
- if (param.has_##OPTIONAL_VALUE ()) \
- { \
- return std::make_pair(true, param.OPTIONAL_VALUE ()); \
- } \
- else \
- { \
- return std::make_pair(false, defaultValue); \
- } \
- }, \
- [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \
- { \
- if (param.has_##FALLBACK_VALUE ()) \
- { \
- return std::make_pair(true, param.FALLBACK_VALUE ()); \
- } \
- else \
- { \
- return std::make_pair(false, defaultValue); \
- } \
- }, \
- DEFAULT_VALUE)
-
-} // namespace <anonymous>
-
-const std::map<std::string, ICaffeParser::CaffeParserImpl::OperationParsingFunction>
- ICaffeParser::CaffeParserImpl::ms_CaffeLayerNameToParsingFunctions = {
- { "Input", &CaffeParserImpl::ParseInputLayer },
- { "Convolution", &CaffeParserImpl::ParseConvLayer },
- { "Deconvolution",&CaffeParserImpl::ParseDeconvLayer },
- { "Pooling", &CaffeParserImpl::ParsePoolingLayer },
- { "ReLU", &CaffeParserImpl::ParseReluLayer },
- { "LRN", &CaffeParserImpl::ParseLRNLayer },
- { "InnerProduct", &CaffeParserImpl::ParseInnerProductLayer },
- { "Softmax", &CaffeParserImpl::ParseSoftmaxLayer },
- { "Eltwise", &CaffeParserImpl::ParseEltwiseLayer },
- { "Concat", &CaffeParserImpl::ParseConcatLayer },
- { "BatchNorm", &CaffeParserImpl::ParseBatchNormLayer },
- { "Scale", &CaffeParserImpl::ParseScaleLayer },
- { "Split", &CaffeParserImpl::ParseSplitLayer },
- { "Dropout", &CaffeParserImpl::ParseDropoutLayer},
- { "ArgMax", &CaffeParserImpl::ParseArgmaxLayer},
-};
-
-ICaffeParser::CaffeParserImpl::CaffeParserImpl()
- : m_Network(nullptr, nullptr)
-{
-
-}
-
-CaffeParser::CaffeParser()
-: CaffeParserImpl()
-{
-
-}
-
-BindingPointInfo ICaffeParser::CaffeParserImpl::GetNetworkInputBindingInfo(const std::string& name) const
-{
- return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
-}
-
-BindingPointInfo ICaffeParser::CaffeParserImpl::GetNetworkOutputBindingInfo(const std::string& name) const
-{
- return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
-}
-
-std::pair<armnn::LayerBindingId, armnn::TensorInfo> ICaffeParser::CaffeParserImpl::GetBindingInfo(
- const std::string& layerName,
- const char* bindingPointDesc,
- const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
-{
- auto it = nameToBindingInfo.find(layerName);
- if (it == nameToBindingInfo.end())
- {
- throw InvalidArgumentException(
- fmt::format("Unknown binding {} for layer '{}'. {}",
- bindingPointDesc,
- layerName,
- CHECK_LOCATION().AsString()));
- }
- return it->second;
-}
-
-TensorInfo ICaffeParser::CaffeParserImpl::BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const
-{
- std::vector<unsigned int> shape;
- for (int j = 0; j < blobShape.dim_size(); ++j)
- {
- shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
- }
-
- return TensorInfo(armnn::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
-}
-
-BlobShape TensorDescToBlobShape(const TensorInfo& desc)
-{
- BlobShape ret;
- for (unsigned int i = 0; i < desc.GetNumDimensions(); ++i)
- {
- ret.add_dim(i);
- ret.set_dim(armnn::numeric_cast<int>(i), desc.GetShape()[i]);
- }
-
- return ret;
-}
-
-// Note: can move to CaffeParser when/if we optimise the text/string format
-// to load on a layer by layer basis
-vector<const LayerParameter*> ICaffeParser::CaffeParserImpl::GetInputs(const LayerParameter& layerParam)
-{
- std::vector<const caffe::LayerParameter*> ret;
- ret.reserve(armnn::numeric_cast<size_t>(layerParam.bottom_size()));
- for (int j = 0; j < layerParam.bottom_size(); ++j)
- {
- std::string inputName = layerParam.bottom(j);
- auto inputIt = m_CaffeLayersByTopName.find(inputName);
- if (inputIt == m_CaffeLayersByTopName.end())
- {
- throw ParseException(
- fmt::format("Can't find Caffe layer with top called '{}', "
- "which is listed as an input of '{}'. {}",
- inputName,
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- ret.push_back(inputIt->second);
- }
-
- return ret;
-}
-
-void ICaffeParser::CaffeParserImpl::ParseInputLayer(const LayerParameter& layerParam)
-{
- ARMNN_ASSERT(layerParam.type() == "Input");
- ValidateNumInputsOutputs(layerParam, 0, 1);
-
- const InputParameter& param = layerParam.input_param();
-
- const armnn::LayerBindingId inputId = armnn::numeric_cast<armnn::LayerBindingId>(
- m_NetworkInputsBindingInfo.size());
- armnn::IConnectableLayer* const inputLayer = m_Network->AddInputLayer(inputId, layerParam.name().c_str());
-
- // Decides the tensor info for this input. This can be specified in the Caffe network but can also
- // be overriden by user input (m_inputShapes).
- armnn::TensorInfo inputTensorInfo;
-
- const BlobShape* originalShape = param.shape_size() > 0 && param.shape(0).dim_size() > 0 ?
- &param.shape(0) : nullptr;
- if (originalShape)
- {
- inputTensorInfo = BlobShapeToTensorInfo(*originalShape);
- }
-
- auto overrideIt = m_InputShapes.find(layerParam.name());
- if (overrideIt != m_InputShapes.end())
- {
- const TensorShape& overrideShape = overrideIt->second;
- if (originalShape &&
- ( originalShape->dim(1) != overrideShape[1]
- || originalShape->dim(2) != overrideShape[2]
- || originalShape->dim(3) != overrideShape[3]))
- {
- throw ParseException(
- fmt::format("Parsed input shape for '{}' is incompatible with the override provided. {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- inputTensorInfo.SetShape(overrideShape);
- }
- else if (!originalShape)
- {
- throw ParseException(
- fmt::format("No input descriptor given for '{}' and no input shape found in caffe model. {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- TrackInputBinding(inputLayer, inputId, inputTensorInfo);
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), inputLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::AddConvLayerWithSplits(const caffe::LayerParameter& layerParam,
- const armnn::Convolution2dDescriptor& desc,
- unsigned int kernelW,
- unsigned int kernelH)
-{
- ARMNN_ASSERT(layerParam.type() == "Convolution");
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- ConvolutionParameter convParam = layerParam.convolution_param();
- BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
- const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
-
- // asusme these were already verified by the caller ParseConvLayer() function
- ARMNN_ASSERT(numGroups < inputShape.dim(1));
- ARMNN_ASSERT(numGroups > 1);
-
- // Handle grouping
- armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
-
- vector<string> convLayerNames(numGroups);
- vector<armnn::IConnectableLayer*> convLayers(numGroups);
- convLayerNames[0] = layerParam.name();
-
- // This convolution is to be applied to chunks of the input data so add a splitter layer
-
- // Redirect the convolution input to the splitter
- unsigned int splitterDimSizes[4] = {static_cast<unsigned int>(inputShape.dim(0)),
- static_cast<unsigned int>(inputShape.dim(1)),
- static_cast<unsigned int>(inputShape.dim(2)),
- static_cast<unsigned int>(inputShape.dim(3))};
-
- // Split dimension 1 of the splitter output shape and conv input shapes
- // according to the number of groups
-
- splitterDimSizes[1] /= numGroups;
- inputShape.set_dim(1, splitterDimSizes[1]);
-
- // This is used to describe how the input is to be split
- ViewsDescriptor splitterDesc(numGroups);
-
- // Create an output node for each group, giving each a unique name
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- // Work out the names of the splitter layers child convolutions
- stringstream ss;
- ss << layerParam.name() << "_" << g;
- convLayerNames[g] = ss.str();
-
- splitterDesc.SetViewOriginCoord(g, 1, splitterDimSizes[1] * g);
-
- // Set the size of the views.
- for (unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
- {
- splitterDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
- }
- }
-
- const std::string splitterLayerName = std::string("splitter_") + layerParam.bottom(0);
- armnn::IConnectableLayer* splitterLayer = m_Network->AddSplitterLayer(splitterDesc, splitterLayerName.c_str());
-
- inputConnection.Connect(splitterLayer->GetInputSlot(0));
- for (unsigned int i = 0; i < splitterLayer->GetNumOutputSlots(); i++)
- {
- splitterLayer->GetOutputSlot(i).SetTensorInfo(BlobShapeToTensorInfo(inputShape));
- }
-
- unsigned int numFilters = convParam.num_output();
-
- // Populates convolution output tensor descriptor dimensions.
- BlobShape outputShape;
- outputShape.add_dim(0);
- outputShape.set_dim(0, inputShape.dim(0));
- outputShape.add_dim(1);
- // Ensures that dimension 1 of the convolution output is split according to the number of groups.
- outputShape.set_dim(1, numFilters / numGroups);
- outputShape.add_dim(2);
- outputShape.set_dim(
- 2, (static_cast<int>(
- static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - (desc.m_DilationX * (kernelH - 1) + 1)) /
- static_cast<float>(desc.m_StrideY)) + 1));
- outputShape.add_dim(3);
- outputShape.set_dim(
- 3, (static_cast<int>(
- static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - (desc.m_DilationY * (kernelW - 1) + 1)) /
- static_cast<float>(desc.m_StrideX)) + 1));
-
- // Load the weight data for ALL groups
- vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
- inputShape.dim(1) * // number of input channels
- outputShape.dim(1) * // number of output channels
- kernelH *
- kernelW));
- GetDataFromBlob(layerParam, weightData, 0);
-
- const unsigned int weightDimSizes[4] = {
- static_cast<unsigned int>(outputShape.dim(1)),
- static_cast<unsigned int>(inputShape.dim(1)),
- kernelH,
- kernelW};
-
- TensorInfo biasInfo;
- vector<float> biasData;
-
- if (desc.m_BiasEnabled)
- {
- biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
- GetDataFromBlob(layerParam, biasData, 1);
-
- const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
- biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
- }
-
- const unsigned int numWeightsPerGroup = armnn::numeric_cast<unsigned int>(weightData.size()) / numGroups;
- const unsigned int numBiasesPerGroup = armnn::numeric_cast<unsigned int>(biasData.size()) / numGroups;
-
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- // Sets the slot index, group 0 should be connected to the 0th output of the splitter
- // group 1 should be connected to the 1st output of the splitter.
-
- // Pulls out the weights for this group from that loaded from the model file earlier.
- ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32),
- weightData.data() + numWeightsPerGroup * g);
-
- IConnectableLayer* convLayer = nullptr;
- Optional<ConstTensor> optionalBiases;
- if (desc.m_BiasEnabled)
- {
- // Pulls out the biases for this group from that loaded from the model file earlier.
- ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
- optionalBiases = Optional<ConstTensor>(biases);
- }
- convLayer = m_Network->AddConvolution2dLayer(desc,
- weights,
- optionalBiases,
- convLayerNames[g].c_str());
- convLayers[g] = convLayer;
-
- // If we have more than one group then the input to the nth convolution the splitter layer's nth output,
- // otherwise it's the regular input to this layer.
- armnn::IOutputSlot& splitterInputConnection =
- splitterLayer ? splitterLayer->GetOutputSlot(g) : inputConnection;
- splitterInputConnection.Connect(convLayer->GetInputSlot(0));
- convLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
- }
-
- // If the convolution was performed in chunks, add a layer to concatenate the results
-
- // The merge input shape matches that of the convolution output
- unsigned int concatDimSizes[4] = {static_cast<unsigned int>(outputShape.dim(0)),
- static_cast<unsigned int>(outputShape.dim(1)),
- static_cast<unsigned int>(outputShape.dim(2)),
- static_cast<unsigned int>(outputShape.dim(3))};
-
- // This is used to describe how the input is to be concatenated
- OriginsDescriptor concatDesc(numGroups);
-
- // Now create an input node for each group, using the name from
- // the output of the corresponding convolution
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- concatDesc.SetViewOriginCoord(g, 1, concatDimSizes[1] * g);
- }
-
- // Make sure the output from the concat is the correct size to hold the data for all groups
- concatDimSizes[1] *= numGroups;
- outputShape.set_dim(1, concatDimSizes[1]);
-
- // Finally add the concat layer
- IConnectableLayer* concatLayer = m_Network->AddConcatLayer(concatDesc, layerParam.name().c_str());
-
- if (!concatLayer)
- {
- throw ParseException(
- fmt::format("Failed to create final concat layer for Split+Convolution+Concat. "
- "Layer={} #groups={} #filters={} {}",
- layerParam.name(),
- numGroups,
- numFilters,
- CHECK_LOCATION().AsString()));
- }
-
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- convLayers[g]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(g));
- }
- concatLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, concatDimSizes, DataType::Float32));
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::AddDeconvLayerWithSplits(const caffe::LayerParameter& layerParam,
- const armnn::TransposeConvolution2dDescriptor& desc,
- unsigned int kernelW,
- unsigned int kernelH)
-{
- ARMNN_ASSERT(layerParam.type() == "Deconvolution");
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- ConvolutionParameter convParam = layerParam.convolution_param();
- BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
- const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
-
- // asusme these were already verified by the caller ParseDeconvLayer() function
- ARMNN_ASSERT(numGroups <= inputShape.dim(1));
- ARMNN_ASSERT(numGroups > 1);
-
- // Handle grouping
- armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
-
- vector<string> convLayerNames(numGroups);
- vector<armnn::IConnectableLayer*> convLayers(numGroups);
- convLayerNames[0] = layerParam.name();
-
- // This deconvolution is to be applied to chunks of the input data so add a splitter layer
-
- // Redirect the deconvolution input to the splitter
- unsigned int splitterDimSizes[4] = {static_cast<unsigned int>(inputShape.dim(0)),
- static_cast<unsigned int>(inputShape.dim(1)),
- static_cast<unsigned int>(inputShape.dim(2)),
- static_cast<unsigned int>(inputShape.dim(3))};
-
- // Split dimension 1 of the splitter output shape and deconv input shapes
- // according to the number of groups
-
- splitterDimSizes[1] /= numGroups;
- inputShape.set_dim(1, splitterDimSizes[1]);
-
- // This is used to describe how the input is to be split
- ViewsDescriptor splitterDesc(numGroups);
-
- // Create an output node for each group, giving each a unique name
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- // Work out the names of the splitter layers child deconvolutions
- stringstream ss;
- ss << layerParam.name() << "_" << g;
- convLayerNames[g] = ss.str();
-
- splitterDesc.SetViewOriginCoord(g, 1, splitterDimSizes[1] * g);
-
- // Set the size of the views.
- for (unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
- {
- splitterDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
- }
- }
-
- const std::string splitterLayerName = std::string("splitter_") + layerParam.bottom(0);
- armnn::IConnectableLayer* splitterLayer = m_Network->AddSplitterLayer(splitterDesc, splitterLayerName.c_str());
-
- inputConnection.Connect(splitterLayer->GetInputSlot(0));
- for (unsigned int i = 0; i < splitterLayer->GetNumOutputSlots(); i++)
- {
- splitterLayer->GetOutputSlot(i).SetTensorInfo(BlobShapeToTensorInfo(inputShape));
- }
-
- unsigned int numFilters = convParam.num_output();
-
- // Populates deconvolution output tensor descriptor dimensions.
- BlobShape outputShape;
- outputShape.add_dim(0);
- outputShape.set_dim(0, inputShape.dim(0));
- outputShape.add_dim(1);
- // Ensures that dimension 1 of the deconvolution output is split according to the number of groups.
- outputShape.set_dim(1, numFilters / numGroups);
- outputShape.add_dim(2);
- outputShape.set_dim(
- 2, (static_cast<int>(
- desc.m_StrideY * (inputShape.dim(2) - 1) - 2 * desc.m_PadBottom + kernelH)));
- outputShape.add_dim(3);
- outputShape.set_dim(
- 3, (static_cast<int>(
- desc.m_StrideX * (inputShape.dim(3) - 1) - 2 * desc.m_PadRight + kernelW)));
-
- // Load the weight data for ALL groups
- vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
- inputShape.dim(1) * // number of input channels
- outputShape.dim(1) * // number of output channels
- kernelH *
- kernelW));
- GetDataFromBlob(layerParam, weightData, 0);
-
- const unsigned int weightDimSizes[4] = {
- static_cast<unsigned int>(outputShape.dim(1)),
- static_cast<unsigned int>(inputShape.dim(1)),
- kernelH,
- kernelW};
-
- TensorInfo biasInfo;
- vector<float> biasData;
-
- if (desc.m_BiasEnabled)
- {
- biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
- GetDataFromBlob(layerParam, biasData, 1);
-
- const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
- biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
- }
-
- const unsigned int numWeightsPerGroup = armnn::numeric_cast<unsigned int>(weightData.size()) / numGroups;
- const unsigned int numBiasesPerGroup = armnn::numeric_cast<unsigned int>(biasData.size()) / numGroups;
-
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- // Sets the slot index, group 0 should be connected to the 0th output of the splitter
- // group 1 should be connected to the 1st output of the splitter.
-
- // Pulls out the weights for this group from that loaded from the model file earlier.
- ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32),
- weightData.data() + numWeightsPerGroup * g);
-
- IConnectableLayer* deconvLayer = nullptr;
- Optional<ConstTensor> optionalBiases;
- if (desc.m_BiasEnabled)
- {
- // Pulls out the biases for this group from that loaded from the model file earlier.
- ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
- optionalBiases = Optional<ConstTensor>(biases);
- }
- deconvLayer = m_Network->AddTransposeConvolution2dLayer(desc,
- weights,
- optionalBiases,
- convLayerNames[g].c_str());
- convLayers[g] = deconvLayer;
-
- // If we have more than one group then the input to the nth deconvolution the splitter layer's nth output,
- // otherwise it's the regular input to this layer.
- armnn::IOutputSlot& splitterInputConnection =
- splitterLayer ? splitterLayer->GetOutputSlot(g) : inputConnection;
- splitterInputConnection.Connect(deconvLayer->GetInputSlot(0));
- deconvLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
- }
-
- // If the deconvolution was performed in chunks, add a layer to concatenate the results
-
- // The merge input shape matches that of the deconvolution output
- unsigned int concatDimSizes[4] = {static_cast<unsigned int>(outputShape.dim(0)),
- static_cast<unsigned int>(outputShape.dim(1)),
- static_cast<unsigned int>(outputShape.dim(2)),
- static_cast<unsigned int>(outputShape.dim(3))};
-
- // This is used to describe how the input is to be concatenated
- OriginsDescriptor concatDesc(numGroups);
-
- // Now create an input node for each group, using the name from
- // the output of the corresponding deconvolution
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- concatDesc.SetViewOriginCoord(g, 1, concatDimSizes[1] * g);
- }
-
- // Make sure the output from the concat is the correct size to hold the data for all groups
- concatDimSizes[1] *= numGroups;
- outputShape.set_dim(1, concatDimSizes[1]);
-
- // Finally add the concat layer
- IConnectableLayer* concatLayer = m_Network->AddConcatLayer(concatDesc, layerParam.name().c_str());
-
- if (!concatLayer)
- {
- throw ParseException(
- fmt::format("Failed to create final concat layer for Split+Deconvolution+Concat. "
- "Layer={} #groups={} #filters={} {}",
- layerParam.name(),
- numGroups,
- numFilters,
- CHECK_LOCATION().AsString()));
- }
-
- for (unsigned int g = 0; g < numGroups; ++g)
- {
- convLayers[g]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(g));
- }
- concatLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, concatDimSizes, DataType::Float32));
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter& layerParam,
- const armnn::Convolution2dDescriptor& convDesc,
- unsigned int kernelW,
- unsigned int kernelH)
-{
- ARMNN_ASSERT(layerParam.type() == "Convolution");
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- ConvolutionParameter convParam = layerParam.convolution_param();
- BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
-
- DepthwiseConvolution2dDescriptor desc;
- desc.m_PadLeft = convDesc.m_PadLeft;
- desc.m_PadRight = convDesc.m_PadRight;
- desc.m_PadTop = convDesc.m_PadTop;
- desc.m_PadBottom = convDesc.m_PadBottom;
- desc.m_StrideX = convDesc.m_StrideX;
- desc.m_StrideY = convDesc.m_StrideY;
- desc.m_DilationX = convDesc.m_DilationX;
- desc.m_DilationY = convDesc.m_DilationY;
- desc.m_BiasEnabled = convDesc.m_BiasEnabled;
-
- unsigned int numFilters = convParam.num_output();
-
- BlobShape outputShape;
- outputShape.add_dim(0);
- outputShape.set_dim(0, inputShape.dim(0));
- outputShape.add_dim(1);
- outputShape.set_dim(1, numFilters);
- outputShape.add_dim(2);
- outputShape.set_dim(
- 2, (static_cast<int>(
- static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - (desc.m_DilationX * (kernelH - 1) + 1)) /
- static_cast<float>(desc.m_StrideY)) + 1));
- outputShape.add_dim(3);
- outputShape.set_dim(
- 3, (static_cast<int>(
- static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - (desc.m_DilationY * (kernelW - 1) + 1)) /
- static_cast<float>(desc.m_StrideX)) + 1));
-
- // Load the weight data
- size_t allWeightsSize = armnn::numeric_cast<size_t>(inputShape.dim(1) * kernelH * kernelW);
- vector<float> weightData(allWeightsSize);
-
- GetDataFromBlob(layerParam, weightData, 0);
-
- // depth multiplier will be 1 for the depthwise convolution
- const unsigned int weightDimSizes[4] = {
- static_cast<unsigned int>(1), // depth multiplier
- static_cast<unsigned int>(inputShape.dim(1)), // #channels
- kernelH,
- kernelW};
-
- armnn::IConnectableLayer* returnLayer = nullptr;
- ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
- Optional<ConstTensor> optionalBiases;
- vector<float> biasData;
- if (desc.m_BiasEnabled)
- {
- TensorInfo biasInfo;
-
- biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
- GetDataFromBlob(layerParam, biasData, 1);
-
- const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
- biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
-
- ConstTensor biases(biasInfo, biasData.data());
- optionalBiases = Optional<ConstTensor>(biases);
- }
- returnLayer = m_Network->AddDepthwiseConvolution2dLayer(desc,
- weights,
- optionalBiases,
- layerParam.name().c_str());
-
- if (!returnLayer)
- {
- throw ParseException(
- fmt::format("Failed to create depthwise convolution layer. "
- "Layer={} #filters={} {}",
- layerParam.name(),
- numFilters,
- CHECK_LOCATION().AsString()));
- }
- armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
- inputConnection.Connect(returnLayer->GetInputSlot(0));
- returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseConvLayer(const LayerParameter& layerParam)
-{
- // Ignored Caffe Parameters
- // * Weight Filler
- // * Bias Filler
- // * Engine
- // * Force nd_im2col
- // * Axis
-
- // Not Available ArmNN Interface Parameters
- // * Rounding policy;
-
- ARMNN_ASSERT(layerParam.type() == "Convolution");
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- ConvolutionParameter convParam = layerParam.convolution_param();
- BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
- const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
- unsigned int numFilters = convParam.num_output();
-
- const auto notFound = std::numeric_limits<unsigned int>::max();
-
- unsigned int kernelH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- kernel_h, kernel_size, unsigned int, notFound);
- unsigned int kernelW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- kernel_w, kernel_size, unsigned int, notFound);
-
- unsigned int strideH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- stride_h, stride, unsigned int, 1u);
- unsigned int strideW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- stride_w, stride, unsigned int, 1u);
-
- unsigned int padH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- pad_h, pad, unsigned int, 0u);
- unsigned int padW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- pad_w, pad, unsigned int, 0u);
-
- unsigned int dilationH = convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
- unsigned int dilationW = convParam.dilation_size() > 1 ? convParam.dilation(1) :
- convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
-
- Convolution2dDescriptor convolution2dDescriptor;
- convolution2dDescriptor.m_PadLeft = padW;
- convolution2dDescriptor.m_PadRight = padW;
- convolution2dDescriptor.m_PadTop = padH;
- convolution2dDescriptor.m_PadBottom = padH;
- convolution2dDescriptor.m_StrideX = strideW;
- convolution2dDescriptor.m_StrideY = strideH;
- convolution2dDescriptor.m_DilationX = dilationW;
- convolution2dDescriptor.m_DilationY = dilationH;
- convolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() : true;
-
- if (numGroups > numFilters)
- {
- throw ParseException(
- fmt::format("Error parsing Convolution: {}. "
- "The 'group'={} parameter cannot be larger than the "
- "number of filters supplied ='{}'. {}",
- layerParam.name(),
- numGroups,
- numFilters,
- CHECK_LOCATION().AsString()));
- }
-
- if (inputShape.dim_size() != 4)
- {
- throw ParseException(
- fmt::format("Convolution input shape is expected to have 4 dimensions. "
- "{}'s input has only {}. {}",
- layerParam.name(),
- inputShape.dim_size(),
- CHECK_LOCATION().AsString()));
- }
-
- if (numGroups > 1)
- {
- if (numGroups > inputShape.dim(1))
- {
- throw ParseException(
- fmt::format("Error parsing Convolution: {}. "
- "The 'group'={} parameter cannot be larger than the "
- "channel of the input shape={} (in NCHW format). {}",
- layerParam.name(),
- numGroups,
- inputShape.dim(1),
- CHECK_LOCATION().AsString()));
- }
- else if (numGroups == inputShape.dim(1))
- {
- // we use a depthwise convolution here, because the number of groups equals to the
- // input channels
- AddConvLayerWithDepthwiseConv(layerParam, convolution2dDescriptor, kernelW, kernelH);
- return;
- }
- else
- {
- // we split the input by channels into channels/groups separate convolutions
- // and concatenate the results afterwards
- AddConvLayerWithSplits(layerParam, convolution2dDescriptor, kernelW, kernelH);
- return;
- }
- }
-
- // NOTE: at this point we only need to handle #group=1 case, all other cases should be
- // handled by the AddConvLayer* helpers
-
- // Populate convolution output tensor descriptor dimensions
- BlobShape outputShape;
- outputShape.add_dim(0);
- outputShape.set_dim(0, inputShape.dim(0));
- outputShape.add_dim(1);
- outputShape.set_dim(1, numFilters);
- outputShape.add_dim(2);
- outputShape.set_dim(
- 2, (static_cast<int>(
- static_cast<float>(inputShape.dim(2) + 2 * padH - (dilationH * (kernelH - 1) + 1)) /
- static_cast<float>(strideH)) + 1));
- outputShape.add_dim(3);
- outputShape.set_dim(
- 3, (static_cast<int>(
- static_cast<float>(inputShape.dim(3) + 2 * padW - (dilationW * (kernelW - 1) + 1)) /
- static_cast<float>(strideW)) + 1));
-
- // Load the weight data for ALL groups
- vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
- outputShape.dim(1) *
- kernelH *
- kernelW));
- GetDataFromBlob(layerParam, weightData, 0);
-
- const unsigned int weightDimSizes[4] = {
- static_cast<unsigned int>(outputShape.dim(1)), // output channels
- static_cast<unsigned int>(inputShape.dim(1)), // input channels
- kernelH,
- kernelW};
-
- armnn::IConnectableLayer* returnLayer = nullptr;
-
- // Pull out the weights for this group from that loaded from the model file earlier
- ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
- Optional<ConstTensor> optionalBiases;
- vector<float> biasData;
- if (convolution2dDescriptor.m_BiasEnabled)
- {
- TensorInfo biasInfo;
-
- biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
- GetDataFromBlob(layerParam, biasData, 1);
-
- const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
- biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
-
- // Pull out the biases for this group from that loaded from the model file earlier
- ConstTensor biases(biasInfo, biasData.data());
- optionalBiases = Optional<ConstTensor>(biases);
- }
- returnLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor,
- weights,
- optionalBiases,
- layerParam.name().c_str());
-
- armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
- inputConnection.Connect(returnLayer->GetInputSlot(0));
- returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
-
- if (!returnLayer)
- {
- throw ParseException(
- fmt::format("Failed to create Convolution layer. "
- "Layer={} #groups={} #filters={} {}",
- layerParam.name(),
- numGroups,
- numFilters,
- CHECK_LOCATION().AsString()));
- }
-
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseDeconvLayer(const LayerParameter& layerParam)
-{
- // Ignored Caffe Parameters
- // * Weight Filler
- // * Bias Filler
- // * Engine
- // * Force nd_im2col
- // * Axis
-
- // Not Available ArmNN Interface Parameters
- // * Rounding policy;
-
- ARMNN_ASSERT(layerParam.type() == "Deconvolution");
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- ConvolutionParameter convParam = layerParam.convolution_param();
- BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
- const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
- unsigned int numFilters = convParam.num_output();
-
- const auto notFound = std::numeric_limits<unsigned int>::max();
-
- unsigned int kernelH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- kernel_h, kernel_size, unsigned int, notFound);
- unsigned int kernelW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- kernel_w, kernel_size, unsigned int, notFound);
-
- unsigned int strideH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- stride_h, stride, unsigned int, 1u);
- unsigned int strideW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- stride_w, stride, unsigned int, 1u);
-
- unsigned int padH = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- pad_h, pad, unsigned int, 0u);
- unsigned int padW = GET_OPTIONAL_WITH_VECTOR_FALLBACK(convParam, ConvolutionParameter,
- pad_w, pad, unsigned int, 0u);
-
- unsigned int dilationH = convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
- unsigned int dilationW = convParam.dilation_size() > 1 ? convParam.dilation(1) :
- convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
-
- if (dilationH != 1 || dilationW != 1) {
- fmt::format("Dilated decnvolution is not supported. "
- "{}'s input has dilation {} {}. {}",
- layerParam.name(),
- dilationW, dilationH,
- CHECK_LOCATION().AsString());
- }
-
- TransposeConvolution2dDescriptor deconvolution2dDescriptor;
- deconvolution2dDescriptor.m_PadLeft = padW;
- deconvolution2dDescriptor.m_PadRight = padW;
- deconvolution2dDescriptor.m_PadTop = padH;
- deconvolution2dDescriptor.m_PadBottom = padH;
- deconvolution2dDescriptor.m_StrideX = strideW;
- deconvolution2dDescriptor.m_StrideY = strideH;
- deconvolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() : true;
-
- if (numGroups > numFilters)
- {
- throw ParseException(
- fmt::format("Error parsing Deconvolution: {}. "
- "The 'group'={} parameter cannot be larger than the "
- "number of filters supplied ='{}'. {}",
- layerParam.name(),
- numGroups,
- numFilters,
- CHECK_LOCATION().AsString()));
- }
-
- if (inputShape.dim_size() != 4)
- {
- throw ParseException(
- fmt::format("Deconvolution input shape is expected to have 4 dimensions. "
- "{}'s input has only {}. {}",
- layerParam.name(),
- inputShape.dim_size(),
- CHECK_LOCATION().AsString()));
- }
-
- if (numGroups > 1)
- {
- if (numGroups > inputShape.dim(1))
- {
- throw ParseException(
- fmt::format("Error parsing Deconvolution: {}. "
- "The 'group'={} parameter cannot be larger than the "
- "channel of the input shape={} (in NCHW format). {}",
- layerParam.name(),
- numGroups,
- inputShape.dim(1),
- CHECK_LOCATION().AsString()));
- }
- else
- {
- // we split the input by channels into channels/groups separate convolutions
- // and concatenate the results afterwards
- AddDeconvLayerWithSplits(layerParam, deconvolution2dDescriptor, kernelW, kernelH);
- return;
- }
- }
-
- // NOTE: at this point we only need to handle #group=1 case, all other cases should be
- // handled by the AddDeconvLayer* helpers
-
- // Populate deconvolution output tensor descriptor dimensions
- BlobShape outputShape;
- outputShape.add_dim(0);
- outputShape.set_dim(0, inputShape.dim(0));
- outputShape.add_dim(1);
- outputShape.set_dim(1, numFilters);
- outputShape.add_dim(2);
- outputShape.set_dim(
- 2, (static_cast<int>(
- strideH * (inputShape.dim(2) - 1) - 2 * padH + (dilationH * (kernelH - 1) + 1))));
- outputShape.add_dim(3);
- outputShape.set_dim(
- 3, (static_cast<int>(
- strideW * (inputShape.dim(3) - 1) - 2 * padW + (dilationW * (kernelW - 1) + 1))));
-
- // Load the weight data for ALL groups
- vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
- outputShape.dim(1) *
- kernelH *
- kernelW));
- GetDataFromBlob(layerParam, weightData, 0);
-
- const unsigned int weightDimSizes[4] = {
- static_cast<unsigned int>(outputShape.dim(1)), // output channels
- static_cast<unsigned int>(inputShape.dim(1)), // input channels
- kernelH,
- kernelW};
-
- armnn::IConnectableLayer* returnLayer = nullptr;
-
- // Pull out the weights for this group from that loaded from the model file earlier
- ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), weightData.data());
- Optional<ConstTensor> optionalBiases;
- vector<float> biasData;
- if (deconvolution2dDescriptor.m_BiasEnabled)
- {
- TensorInfo biasInfo;
-
- biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
- GetDataFromBlob(layerParam, biasData, 1);
-
- const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
- biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
-
- // Pull out the biases for this group from that loaded from the model file earlier
- ConstTensor biases(biasInfo, biasData.data());
- optionalBiases = Optional<ConstTensor>(biases);
- }
- returnLayer = m_Network->AddTransposeConvolution2dLayer(deconvolution2dDescriptor,
- weights,
- optionalBiases,
- layerParam.name().c_str());
-
- armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
- inputConnection.Connect(returnLayer->GetInputSlot(0));
- returnLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
-
- if (!returnLayer)
- {
- throw ParseException(
- fmt::format("Failed to create Deconvolution layer. "
- "Layer={} #groups={} #filters={} {}",
- layerParam.name(),
- numGroups,
- numFilters,
- CHECK_LOCATION().AsString()));
- }
-
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParsePoolingLayer(const LayerParameter& layerParam)
-{
- // Ignored Caffe Parameters
- // Stochastic Pooling
- // Engine
-
- ValidateNumInputsOutputs(layerParam, 1, 1);
- PoolingParameter param = layerParam.pooling_param();
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
-
- const auto notFound = std::numeric_limits<unsigned int>::max();
-
- unsigned int kernel_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
- kernel_h, kernel_size, unsigned int, notFound);
- unsigned int kernel_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
- kernel_w, kernel_size, unsigned int, notFound);
-
- if ((kernel_h == notFound || kernel_w == notFound) && param.has_global_pooling())
- {
- kernel_h = inputInfo.GetShape()[2];
- kernel_w = inputInfo.GetShape()[3];
- }
-
- unsigned int stride_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
- stride_h, stride, unsigned int, notFound);
- unsigned int stride_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
- stride_h, stride, unsigned int, notFound);
-
- if ((stride_h == notFound || stride_w == notFound) && param.has_global_pooling())
- {
- stride_h = 1;
- stride_w = 1;
- }
-
- unsigned int pad_h = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
- pad_h, pad, unsigned int, 0u);
- unsigned int pad_w = GET_OPTIONAL_WITH_FALLBACK(param, PoolingParameter,
- pad_w, pad, unsigned int, 0u);
-
- // Populate Weight and Bias Filter Descriptor
- Pooling2dDescriptor pooling2dDescriptor;
- if (param.has_pool())
- {
- PoolingParameter_PoolMethod p = param.pool();
- switch (p)
- {
- case PoolingParameter_PoolMethod_MAX:
- {
- pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max;
- break;
- }
- case PoolingParameter_PoolMethod_AVE:
- {
- pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
- break;
- }
- case PoolingParameter_PoolMethod_STOCHASTIC:
- {
- throw ParseException(
- fmt::format("Pooling Layer: Stochastic Pooling Not Supported. Layer={} {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- default:
- {
- throw ParseException(
- fmt::format("Pooling Layer: unknown pooling method: {} for layer: {} {}",
- p,
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- }
- }
- else
- {
- throw ParseException(
- fmt::format("No Pooling Method Defined for {} {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
-
- pooling2dDescriptor.m_PadLeft = pad_w;
- pooling2dDescriptor.m_PadRight = pad_w;
- pooling2dDescriptor.m_PadTop = pad_h;
- pooling2dDescriptor.m_PadBottom = pad_h;
- pooling2dDescriptor.m_StrideX = stride_w;
- pooling2dDescriptor.m_StrideY = stride_h;
- pooling2dDescriptor.m_PoolWidth = kernel_w;
- pooling2dDescriptor.m_PoolHeight = kernel_h;
-
- pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Ceiling;
- pooling2dDescriptor.m_PaddingMethod = PaddingMethod::IgnoreValue;
-
- armnn::IConnectableLayer* poolingLayer = m_Network->AddPooling2dLayer(pooling2dDescriptor,
- layerParam.name().c_str());
-
- TensorInfo outputInfo(
- { inputInfo.GetShape()[0],
- inputInfo.GetShape()[1],
- static_cast<unsigned int>(ceil(
- static_cast<float>(inputInfo.GetShape()[2] + 2 * pad_h - kernel_h) /
- armnn::numeric_cast<float>(stride_h))) + 1,
- static_cast<unsigned int>(ceil(
- static_cast<float>(inputInfo.GetShape()[3] + 2 * pad_w - kernel_w) /
- armnn::numeric_cast<float>(stride_w))) + 1 },
- DataType::Float32);
-
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(poolingLayer->GetInputSlot(0));
- poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), poolingLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseArgmaxLayer(const LayerParameter& layerParam)
-{
- ValidateNumInputsOutputs(layerParam, 1, 1);
- ArgMaxParameter param = layerParam.argmax_param();
-
- BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
-
- const unsigned int topK = param.has_top_k() ? param.top_k() : 1;
- if (topK != 1) {
- throw ParseException(
- fmt::format("ArgMaxLayer: Only support top_k equals to 1. Layer={} {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
-
- const unsigned int outMaxVal = param.has_out_max_val() ? param.out_max_val() : false;
- if (outMaxVal) {
- throw ParseException(
- fmt::format("ArgMaxLayer: Does not support out_max_val. Layer={} {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
-
- int axis = param.has_axis() ? param.axis() : 1;
- if (axis < 0) {
- axis = inputShape.dim_size() - axis;
- }
- if ((axis < 0) || (axis >= inputShape.dim_size())) {
- throw ParseException(
- fmt::format("ArgMaxLayer: Invalid axis value which outside range of input dims. "
- "{}'s input has input dim_size {}, requested axis: {}. {}",
- layerParam.name(),
- inputShape.dim_size(),
- axis,
- CHECK_LOCATION().AsString()));
- }
-
- ArgMinMaxDescriptor desc;
- desc.m_Axis = axis;
- desc.m_Function = ArgMinMaxFunction::Max;
-
- armnn::IConnectableLayer* argmaxLayer = m_Network->AddArgMinMaxLayer(desc,
- layerParam.name().c_str());
-
- TensorShape outputShape(static_cast<unsigned int>(inputShape.dim_size() - 1));
- int j = 0;
- // remove the flatten axis
- for (int i = 0; i < inputShape.dim_size(); ++i)
- {
- if (i == axis) continue;
- outputShape[static_cast<unsigned int>(j++)] = static_cast<unsigned int>(inputShape.dim(i));
- }
- TensorInfo outputInfo(outputShape, DataType::Signed32);
-
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(argmaxLayer->GetInputSlot(0));
- argmaxLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), argmaxLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseReluLayer(const LayerParameter& layerParam)
-{
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- const string& name = layerParam.name();
- const ReLUParameter& param = layerParam.relu_param();
-
- ActivationDescriptor activationDescriptor;
- const float negativeSlope = param.negative_slope();
- if (negativeSlope == 0.0f)
- {
- activationDescriptor.m_Function = ActivationFunction::ReLu;
- }
- else
- {
- activationDescriptor.m_Function = ActivationFunction::LeakyReLu;
- activationDescriptor.m_A = negativeSlope;
- }
-
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
- IConnectableLayer* const activationLayer = m_Network->AddActivationLayer(activationDescriptor, name.c_str());
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(activationLayer->GetInputSlot(0));
- activationLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), activationLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseLRNLayer(const LayerParameter& layerParam)
-{
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- LRNParameter param = layerParam.lrn_param();
-
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
-
- // Ignored BATCH NORMALIZATION Caffe Parameters.
- // Ignored MVN Caffe Parameters.
- // Ignored LRN Caffe Parameters.
- // Engine
-
- NormalizationDescriptor normalizationDescriptor;
- if (param.has_norm_region())
- {
- LRNParameter_NormRegion n = param.norm_region();
- switch (n)
- {
- case LRNParameter_NormRegion_ACROSS_CHANNELS:
- {
- normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
- break;
- }
- case LRNParameter_NormRegion_WITHIN_CHANNEL:
- {
- normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Within;
- break;
- }
- default:
- {
- throw ParseException(
- fmt::format("Unknown region {} for LRN layer {} {}",
- n,
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- }
- }
- else
- {
- // Caffe defaults to normalization across channels.
- normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
- }
-
- normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
- if (param.has_local_size())
- {
- normalizationDescriptor.m_NormSize = param.local_size();
- }
- else
- {
- throw ParseException(
- fmt::format("local_size not defined for LRN layer {} {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
-
- if (param.has_alpha())
- {
- normalizationDescriptor.m_Alpha = param.alpha();
- normalizationDescriptor.m_Alpha /= armnn::numeric_cast<float>(param.local_size());
- }
- else
- {
- throw ParseException(
- fmt::format("Alpha not defined for LRN layer {} {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- if (param.has_beta())
- {
- normalizationDescriptor.m_Beta = param.beta();
- }
- else
- {
- throw ParseException(
- fmt::format("Beta not defined for LRN layer {} {}",
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
-
- if (param.has_k())
- {
- normalizationDescriptor.m_K = param.k();
- }
- else
- {
- normalizationDescriptor.m_K = 1;
- }
-
- IConnectableLayer* const normLayer = m_Network->AddNormalizationLayer(normalizationDescriptor,
- layerParam.name().c_str());
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(normLayer->GetInputSlot(0));
- normLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
-
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), normLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseInnerProductLayer(const LayerParameter& layerParam)
-{
- InnerProductParameter param = layerParam.inner_product_param();
-
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- unsigned int outputSize = param.num_output();
-
- // Ignored Caffe Parameters:
- // Weight Filler
- // Bias Filler
- // Engine
- // Axis
-
- FullyConnectedDescriptor tensorFullyConnectedDescriptor;
-
- if (param.has_transpose())
- {
- // If true, assumes transposed weights.
- tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = param.transpose();
- }
- else
- {
- // Caffe defaults to transposed.
- tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = true;
- }
-
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
-
- TensorInfo weightInfo;
- TensorInfo biasInfo;
-
- // Allows implicit flattening of extra dimensions.
- unsigned int inputSize = inputInfo.GetShape()[1];
- for (unsigned int i = 2; i < inputInfo.GetNumDimensions(); ++i)
- {
- inputSize *= inputInfo.GetShape()[i];
- }
-
- const float* weightDataPtr = GetArrayPtrFromBlob(layerParam, 0);
- const unsigned int swTD[2] = { outputSize, inputSize };
- ConstTensor weights(TensorInfo(2, swTD, DataType::Float32), weightDataPtr);
-
- tensorFullyConnectedDescriptor.m_BiasEnabled = true;
- // Todo: check whether bias enabled.
- armnn::IConnectableLayer* fullyConnectedLayer = nullptr;
- if (tensorFullyConnectedDescriptor.m_BiasEnabled)
- {
- // BIAS VALUE
- const float* biasDataPtr = GetArrayPtrFromBlob(layerParam, 1);
-
- const unsigned int sbTD[1] = { outputSize };
-
- ConstTensor biases(TensorInfo(1, sbTD, DataType::Float32), biasDataPtr);
-
- fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
- weights,
- Optional<ConstTensor>(biases),
- layerParam.name().c_str());
- }
- else
- {
- fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
- weights,
- EmptyOptional(),
- layerParam.name().c_str());
- }
-
- TensorInfo outputInfo({ inputInfo.GetShape()[0], outputSize }, DataType::Float32);
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(fullyConnectedLayer->GetInputSlot(0));
- fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), fullyConnectedLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseSoftmaxLayer(const LayerParameter& layerParam)
-{
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- SoftmaxParameter param = layerParam.softmax_param();
-
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
-
- // Ignored Caffe Parameters:
- // axis
- // Engine
-
- armnn::SoftmaxDescriptor softmaxDescriptor;
- softmaxDescriptor.m_Axis = 1;
- armnn::IConnectableLayer* const softmaxLayer = m_Network->AddSoftmaxLayer(
- softmaxDescriptor,
- layerParam.name().c_str());
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(softmaxLayer->GetInputSlot(0));
- softmaxLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), softmaxLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseEltwiseLayer(const LayerParameter& layerParam)
-{
- ValidateNumInputsOutputs(layerParam, 2, 1);
-
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
-
- // Ignored Caffe Parameters:
- // coeff
-
- EltwiseParameter_EltwiseOp operation = EltwiseParameter_EltwiseOp_SUM; // Defaults to sum as per caffe.
-
- if (layerParam.has_eltwise_param() && layerParam.eltwise_param().has_operation())
- {
- operation = layerParam.eltwise_param().operation();
- }
-
- armnn::IConnectableLayer* newLayer = nullptr;
- switch (operation)
- {
- case EltwiseParameter_EltwiseOp_SUM:
- {
- newLayer = m_Network->AddAdditionLayer(layerParam.name().c_str());
- break;
- }
- case EltwiseParameter_EltwiseOp_PROD:
- {
- newLayer = m_Network->AddMultiplicationLayer(layerParam.name().c_str());
- break;
- }
- default:
- {
- throw ParseException(
- fmt::format("Unsupported operation {} in Eltwise layer {} {}",
- operation,
- layerParam.name(),
- CHECK_LOCATION().AsString()));
- }
- }
-
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(newLayer->GetInputSlot(0));
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(1)).Connect(newLayer->GetInputSlot(1));
- newLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), newLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseConcatLayer(const LayerParameter& layerParam)
-{
- unsigned int numInputs = static_cast<unsigned int>(layerParam.bottom_size());
- // We assume concat happens along the channel dimension, which is 1 in (0, 1, 2, 3).
- unsigned int concatDim = 1;
- unsigned int numOfDims = 4;
-
- // we only consider 4-D tensor here
- OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numInputs), numOfDims);
- std::vector<unsigned int>mergeDimSizes(numOfDims, 0u);
-
- unsigned int mergeDim = 0;
- for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
- {
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(
- layerParam.bottom(armnn::numeric_cast<int>(viewIndex))).GetTensorInfo();
- // Checks whether the dimensions of the input tensors are actually 4.
- if (inputInfo.GetNumDimensions()!=4)
- {
- throw ParseException(
- fmt::format("The number of dimensions for input tensors of "
- "the concatenation op should be 4. Inputs of {} has "
- "{} dimensions. {}",
- layerParam.name(),
- inputInfo.GetNumDimensions(),
- CHECK_LOCATION().AsString()));
- }
-
- mergeDimSizes[0] = inputInfo.GetShape()[0];
- mergeDimSizes[1] = inputInfo.GetShape()[1];
- mergeDimSizes[2] = inputInfo.GetShape()[2];
- mergeDimSizes[3] = inputInfo.GetShape()[3];
-
- for (unsigned int j = 0; j < concatDim; ++j)
- {
- concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
- }
-
- concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
- mergeDim += mergeDimSizes[concatDim];
-
- for (unsigned int j = concatDim+1; j < numOfDims; ++j)
- {
- concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
- }
- }
- mergeDimSizes[concatDim] = mergeDim;
-
- armnn::IConnectableLayer* concatlayer = m_Network->AddConcatLayer(concatDescriptor, layerParam.name().c_str());
- for (unsigned int i = 0; i < numInputs; ++i)
- {
- armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(armnn::numeric_cast<int>(i)));
- outputSlot.Connect(concatlayer->GetInputSlot(i));
- }
-
- concatlayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(numOfDims, mergeDimSizes.data(), DataType::Float32));
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatlayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseBatchNormLayer(const LayerParameter& layerParam)
-{
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
-
- string name = layerParam.name();
-
- BatchNormParameter param = layerParam.batch_norm_param();
- // If use_global_stats is not explicitly set in the model, assume it to be true (its default value
- // when the network is in the testing phase).
- if (param.has_use_global_stats())
- {
- if (!param.use_global_stats())
- {
- throw ParseException(
- fmt::format("Error parsing Batch Norm layer '{}': "
- "Parameter 'use_global_stats' is set to false, which is "
- "unsupported (value used for training). {}",
- name,
- CHECK_LOCATION().AsString()));
- }
- }
-
- BatchNormalizationDescriptor desc;
- desc.m_Eps = param.eps();
-
- unsigned int channels = inputInfo.GetShape()[1];
- unsigned int shape[] = {channels};
-
- vector<float> meanData(channels);
- GetDataFromBlob(layerParam, meanData, 0);
-
- vector<float> varianceData(channels);
- GetDataFromBlob(layerParam, varianceData, 1);
-
- // Reads moving average factor and applies scaling (if required).
- const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(2));
- const float movingAverageFactor = blob.data(armnn::numeric_cast<int>(0));
- if(movingAverageFactor != 0.0f)
- {
- const float scaleFactor = 1.0f / movingAverageFactor;
- auto scaleFunction = [scaleFactor](float f) -> float { return f * scaleFactor; };
-
- std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
- std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
- }
-
- // Identifies scale operation.
- vector<float> betaData(channels, 0.0f);
- vector<float> gammaData(channels, 1.0f);
-
- ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
- ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
- ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
- ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
-
- armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
- mean, variance, beta, gamma, name.c_str());
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
- batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseScaleLayer(const LayerParameter& layerParam)
-{
- // Current unoptimal solution: add a batchnormalization layer with 0 mean and 1 variance.
- ValidateNumInputsOutputs(layerParam, 1, 1);
-
- const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
-
- string name = layerParam.name();
-
- ScaleParameter param = layerParam.scale_param();
- if (param.axis() != 1)
- {
- // Would have to use something other than BatchNormalizationLayer in this case
- throw ParseException(
- fmt::format("Loading Scale Layer: Only axis 1 is supported currently. "
- "Layer={} Axis={} {}",
- layerParam.name(),
- param.axis(),
- CHECK_LOCATION().AsString()));
- }
-
- unsigned int channels = inputInfo.GetShape()[1];
- unsigned int shape[] = {channels};
-
- BatchNormalizationDescriptor desc;
- desc.m_Eps = 0.0f; // Don't need epsilon if variance is 1.
- vector<float> meanData(channels, 0.0f);
- vector<float> varianceData(channels, 1.0f);
- vector<float> betaData(channels, 0.0f);
- vector<float> gammaData(channels);
-
- GetDataFromBlob(layerParam, gammaData, 0);
-
- if(param.has_bias_term())
- {
- GetDataFromBlob(layerParam, betaData, 1);
- }
-
- ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
- ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
- ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
- ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
-
- armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
- mean, variance, beta, gamma, name.c_str());
- GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
- batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
-}
-
-void ICaffeParser::CaffeParserImpl::ParseSplitLayer(const caffe::LayerParameter& layerParam)
-{
- // Used in caffe to duplicate memory - not necessary in armnn.
- if (layerParam.bottom_size() != 1)
- {
- throw ParseException(
- fmt::format("Split layer '{}' should have exactly 1 bottom. "
- "#bottoms={} {}",
- layerParam.name(),
- layerParam.bottom_size(),
- CHECK_LOCATION().AsString()));
- }
- armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
- for (int i = 0; i < layerParam.top_size(); i++)
- {
- SetArmnnOutputSlotForCaffeTop(layerParam.top(i), outputSlot);
- }
-}
-
-void ICaffeParser::CaffeParserImpl::ParseDropoutLayer(const caffe::LayerParameter& layerParam)
-{
- // Ignored for inference, so patch the single input to its single output.
- if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1)
- {
- throw ParseException(
- fmt::format("Dropout layer '{}' should have exactly 1 bottom and 1 top. "
- "#bottoms={} #tops={} {}",
- layerParam.name(),
- layerParam.bottom_size(),
- layerParam.top_size(),
- CHECK_LOCATION().AsString()));
- }
- SetArmnnOutputSlotForCaffeTop(layerParam.top(0), GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)));
-}
-
-void ICaffeParser::CaffeParserImpl::TrackInputBinding(armnn::IConnectableLayer* layer,
- armnn::LayerBindingId id,
- const armnn::TensorInfo& tensorInfo)
-{
- return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkInputsBindingInfo);
-}
-
-void ICaffeParser::CaffeParserImpl::TrackOutputBinding(armnn::IConnectableLayer* layer,
- armnn::LayerBindingId id,
- const armnn::TensorInfo& tensorInfo)
-{
- return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkOutputsBindingInfo);
-}
-
-void ICaffeParser::CaffeParserImpl::TrackBindingPoint(armnn::IConnectableLayer* layer,
- armnn::LayerBindingId id,
- const armnn::TensorInfo& tensorInfo,
- const char* bindingPointDesc,
- std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
-{
- const std::string layerName = layer->GetName();
- auto it = nameToBindingInfo.find(layerName);
- if (it == nameToBindingInfo.end())
- {
- nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
- }
- else
- {
- throw ParseException(
- fmt::format("Id {} used by more than one {} layer {}",
- id,
- bindingPointDesc,
- CHECK_LOCATION().AsString()));
- }
-}
-
-armnn::IOutputSlot& ICaffeParser::CaffeParserImpl::GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const
-{
- auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
- if (it != m_ArmnnOutputSlotForCaffeTop.end())
- {
- return *it->second;
- }
- else
- {
- throw ParseException(
- fmt::format("Could not find armnn output slot for Caffe top '{}' {}",
- caffeTopName,
- CHECK_LOCATION().AsString()));
- }
-}
-
-void ICaffeParser::CaffeParserImpl::SetArmnnOutputSlotForCaffeTop(
- const std::string& caffeTopName, armnn::IOutputSlot& armnnOutputSlot)
-{
- auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
- if (it == m_ArmnnOutputSlotForCaffeTop.end())
- {
- m_ArmnnOutputSlotForCaffeTop[caffeTopName] = &armnnOutputSlot;
- }
- else
- {
- throw ParseException(
- fmt::format("Attempting to add duplicate entry for Caffe top '{}' {}",
- caffeTopName,
- CHECK_LOCATION().AsString()));
- }
-}
-
-// Note: can move to CaffeParser when/if we optimise the text/string format
-// to load on a layer by layer basis
-void ICaffeParser::CaffeParserImpl::ResolveInPlaceLayers(caffe::NetParameter& netParameter)
-{
- // Finds layers with the same top.
- std::map<std::string, std::vector<caffe::LayerParameter*>> layersByTop;
- for (int layerIdx = 0; layerIdx < netParameter.layer_size(); ++layerIdx)
- {
- caffe::LayerParameter& layer = *netParameter.mutable_layer(layerIdx);
- std::string name = layer.name();
- for (int i = 0; i < layer.top_size(); ++i)
- {
- layersByTop[layer.top(i)].push_back(&layer);
- }
- }
-
- // For each set of layers with the same top, resolves them to a linear chain rather than in-place layers.
- // Note that for 'regular' layers, there will be a single layer in each group and so this will be a no-op.
- for (auto layersWithSameTopIt : layersByTop)
- {
- const std::string& top = layersWithSameTopIt.first;
- const std::vector<caffe::LayerParameter*>& layersWithSameTop = layersWithSameTopIt.second;
-
- // Chains the layers together in the order that they are listed in the prototxt (hopefully this is correct).
- // Note that the last layer will not have its top modified so that other layers will continue to reference it.
- for (unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
- {
- caffe::LayerParameter& layer1 = *layersWithSameTop[layerIdx];
- caffe::LayerParameter& layer2 = *layersWithSameTop[layerIdx+1];
- if (layer1.top_size() != 1)
- {
- throw ParseException(
- fmt::format("Node '{}' is an in-place layer but doesn't have exactly one "
- "top. It has {} instead. {}",
- layer1.name(),
- layer1.top_size(),
- CHECK_LOCATION().AsString()));
- }
- std::string newTop = layer1.name() + "_top";
- layer1.set_top(0, newTop);
- if (layer2.bottom_size() != 1 || layer2.bottom(0) != top)
- {
- throw ParseException(
- fmt::format("Node '{}' is an in-place layer but "
- "doesn't have exactly one bottom, or it doesn't match its top. "
- "#bottoms={}, first bottom is {}, top is {} {}",
- layer2.name(),
- layer2.bottom(0),
- top,
- CHECK_LOCATION().AsString()));
- }
- layer2.set_bottom(0, newTop);
- }
- }
-}
-
-// Note: can move to CaffeParser when/if we optimise the text/string format
-// to load on a layer by layer basis
-void ICaffeParser::CaffeParserImpl::LoadNetParam(NetParameter& netParameter)
-{
- // Caffe models sometimes have an implicit input layer.
- // In that case, add an explicit one.
- if (netParameter.input_size() > 0)
- {
- LayerParameter* newLayer = netParameter.add_layer();
-
- newLayer->set_type("Input");
- newLayer->set_name(netParameter.input(0));
- newLayer->add_top(netParameter.input(0));
-
- InputParameter* inputParam = newLayer->mutable_input_param();
- BlobShape* shape = inputParam->add_shape();
-
- int dim_size = netParameter.input_dim_size();
- for (int i = 0; i < dim_size; ++i)
- {
- shape->add_dim(netParameter.input_dim(i));
- }
- }
-
- // Replaces in-place layers with regular ones to make the rest of the parsing easier.
- ResolveInPlaceLayers(netParameter);
-
- // Creates a lookup of Caffe layers by name.
- for (int i = 0; i < netParameter.layer_size(); ++i)
- {
- const caffe::LayerParameter& layer = netParameter.layer(i);
- for (int i = 0; i < layer.top_size(); ++i)
- {
- m_CaffeLayersByTopName[layer.top(i)] = &layer;
- }
- }
-
- // Finds the output layers the user requested.
- std::vector<const caffe::LayerParameter*> targetLayers;
- for (const std::string& requestedOutputName : m_RequestedOutputs)
- {
- auto nodeIt = m_CaffeLayersByTopName.find(requestedOutputName);
- if (nodeIt == m_CaffeLayersByTopName.end())
- {
- throw ParseException(
- fmt::format("Couldn't find requested output layer '{}' in graph {}",
- requestedOutputName,
- CHECK_LOCATION().AsString()));
- }
- targetLayers.push_back(nodeIt->second);
- }
-
- // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
- std::vector<const caffe::LayerParameter*> sortedNodes;
- if (!armnnUtils::GraphTopologicalSort<const caffe::LayerParameter*>(
- targetLayers,
- [this](const caffe::LayerParameter* node)
- {
- return GetInputs(*node);
- },
- sortedNodes))
- {
- throw ParseException(
- fmt::format("Cycle detected in graph. #nodes: {} {}",
- sortedNodes.size(),
- CHECK_LOCATION().AsString()));
- }
-
- // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
- for (const caffe::LayerParameter* current : sortedNodes)
- {
- auto it = ms_CaffeLayerNameToParsingFunctions.find(current->type());
- if (it == ms_CaffeLayerNameToParsingFunctions.end())
- {
- throw ParseException(
- fmt::format("Unsupported layer type: '{}' for layer {} {}",
- current->type(),
- current->name(),
- CHECK_LOCATION().AsString()));
- }
- auto func = it->second;
- (this->*func)(*current);
- }
-
- // Adds ArmNN output layers connected to each requested output.
- for (const std::string& requestedOutput : m_RequestedOutputs)
- {
- armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
-
- const armnn::LayerBindingId outputId = armnn::numeric_cast<armnn::LayerBindingId>(
- m_NetworkOutputsBindingInfo.size());
- armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
- outputSlot.Connect(outputLayer->GetInputSlot(0));
-
- TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
- }
-}
-
-INetworkPtr ICaffeParser::CaffeParserImpl::CreateNetworkFromTextFile(const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
- FILE* fd = fopen(graphFile, "r");
-
- if (fd == nullptr)
- {
- throw FileNotFoundException(
- fmt::format("Failed to open graph file: {} {}",
- graphFile,
- CHECK_LOCATION().AsString()));
- }
-
- // Parses the file into a message.
- NetParameter netParam;
- auto input = new google::protobuf::io::FileInputStream(fileno(fd));
- bool success = google::protobuf::TextFormat::Parse(input, &netParam);
- delete input;
- fclose(fd);
-
- if (!success)
- {
- throw ParseException(
- fmt::format("Failed to parse graph file: {} {}",
- graphFile,
- CHECK_LOCATION().AsString()));
- }
-
- return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
-}
-
-INetworkPtr ICaffeParser::CaffeParserImpl::CreateNetworkFromString(const char* protoText,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
- // Parses the string into a message.
- NetParameter netParam;
- bool success = google::protobuf::TextFormat::ParseFromString(protoText, &netParam);
-
- if (!success)
- {
- throw ParseException(
- fmt::format("Failed to parse graph string {}",
- CHECK_LOCATION().AsString()));
- }
-
- return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
-}
-
-INetworkPtr CaffeParser::CreateNetworkFromBinaryFile(const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
- FILE* fd = fopen(graphFile, "rb");
-
- if (fd == nullptr)
- {
- throw FileNotFoundException(
- fmt::format("Failed to open graph file at: {} {}",
- graphFile,
- CHECK_LOCATION().AsString()));
- }
-
- // Parses the file into a message.
- NetParameter netParam;
-
- FileInputStream inStream(fileno(fd));
- CodedInputStream codedStream(&inStream);
- codedStream.SetTotalBytesLimit(INT_MAX);
- bool success = netParam.ParseFromCodedStream(&codedStream);
- fclose(fd);
-
- if (!success)
- {
- throw ParseException(
- fmt::format("Failed to parse protobuf file: {} {}",
- graphFile,
- CHECK_LOCATION().AsString()));
- }
-
- return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
-}
-
-// Note: can move to CaffeParser when/if we optimise the text/string format
-// to load on a layer by layer basis
-INetworkPtr ICaffeParser::CaffeParserImpl::CreateNetworkFromNetParameter(NetParameter& netParam,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
- m_NetworkInputsBindingInfo.clear();
- m_NetworkOutputsBindingInfo.clear();
-
- m_Network = INetwork::Create();
-
- m_InputShapes = inputShapes;
- if (requestedOutputs.size() == 0)
- {
- throw ParseException("requestedOutputs must have at least one entry");
- }
- m_RequestedOutputs = requestedOutputs;
-
- try
- {
- LoadNetParam(netParam);
- }
- catch (const ParseException& e)
- {
- Cleanup();
- throw e;
- }
-
- Cleanup();
-
- return move(m_Network);
-}
-
-const std::string ICaffeParser::CaffeParserImpl::GetVersion()
-{
- return CAFFE_PARSER_VERSION;
-}
-
-void ICaffeParser::CaffeParserImpl::Cleanup() {
- // cleanup, in case we reuse this parser
- m_InputShapes.clear();
- m_RequestedOutputs.clear();
- m_ArmnnOutputSlotForCaffeTop.clear();
- // NOTE: when we get the text/string format
- // optimised for memory then this data structure can
- // also move to the CaffeParser class
- m_CaffeLayersByTopName.clear();
-}
-
-}
diff --git a/src/armnnCaffeParser/CaffeParser.hpp b/src/armnnCaffeParser/CaffeParser.hpp
deleted file mode 100644
index 9f93569742..0000000000
--- a/src/armnnCaffeParser/CaffeParser.hpp
+++ /dev/null
@@ -1,189 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-#include "armnn/Types.hpp"
-#include "armnn/NetworkFwd.hpp"
-#include "armnn/Tensor.hpp"
-
-#include <memory>
-#include <vector>
-#include <unordered_map>
-
-namespace caffe
-{
-class BlobShape;
-class LayerParameter;
-class NetParameter;
-}
-
-namespace armnnCaffeParser
-{
-
-class ICaffeParser::CaffeParserImpl
-{
-public:
-
- // Because we haven't looked at reducing the memory usage when loading from Text/String
- // have to retain these functions here for the moment.
- /// Create the network from a protobuf text file on disk
- armnn::INetworkPtr CreateNetworkFromTextFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs);
-
- /// Create the network from a protobuf binary file on the disk.
- virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs) = 0;
-
-
- /// Creates the network directly from protobuf text in a string. Useful for debugging/testing.
- armnn::INetworkPtr CreateNetworkFromString(
- const char* protoText,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs);
-
- /// Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name.
- BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const;
-
- /// Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name.
- BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const;
-
- /// Retrieve version in X.Y.Z form
- static const std::string GetVersion();
-
- CaffeParserImpl();
- virtual ~CaffeParserImpl() = default;
-
-protected:
- /// Adds an armnn layer to m_Network given a Caffe LayerParameter of the correct type
- /// and is responsible for recording any newly created IOutputSlots using SetArmnnOutputSlotForCaffeTop().
- /// @{
- void ParseInputLayer(const caffe::LayerParameter& layerParam);
- void ParseConvLayer(const caffe::LayerParameter& layerParam);
- void ParseDeconvLayer(const caffe::LayerParameter& layerParam);
- void ParsePoolingLayer(const caffe::LayerParameter& layerParam);
- void ParseReluLayer(const caffe::LayerParameter& layerParam);
- void ParseLRNLayer(const caffe::LayerParameter& layerParam);
- void ParseInnerProductLayer(const caffe::LayerParameter& layerParam);
- void ParseSoftmaxLayer(const caffe::LayerParameter& layerParam);
- void ParseEltwiseLayer(const caffe::LayerParameter& layerParam);
- void ParseConcatLayer(const caffe::LayerParameter& layerParam);
- void ParseBatchNormLayer(const caffe::LayerParameter& layerParam);
- void ParseScaleLayer(const caffe::LayerParameter& layerParam);
- void ParseSplitLayer(const caffe::LayerParameter& layerParam);
- void ParseDropoutLayer(const caffe::LayerParameter& layerParam);
- void ParseArgmaxLayer(const caffe::LayerParameter& layerParam);
- /// @}
-
- /// ParseConv may use these helpers depending on the group parameter
- /// @{
- void AddConvLayerWithSplits(const caffe::LayerParameter& layerParam,
- const armnn::Convolution2dDescriptor & desc,
- unsigned int kernelW,
- unsigned int kernelH);
- void AddConvLayerWithDepthwiseConv(const caffe::LayerParameter& layerParam,
- const armnn::Convolution2dDescriptor & desc,
- unsigned int kernelW,
- unsigned int kernelH);
- void AddDeconvLayerWithSplits(const caffe::LayerParameter& layerParam,
- const armnn::TransposeConvolution2dDescriptor& desc,
- unsigned int kernelW,
- unsigned int kernelH);
- /// @}
-
- /// Converts Caffe's protobuf tensor shape format to ArmNN's
- armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const;
-
- void TrackInputBinding(armnn::IConnectableLayer* layer,
- armnn::LayerBindingId id,
- const armnn::TensorInfo& tensorInfo);
-
- static void TrackBindingPoint(armnn::IConnectableLayer* layer, armnn::LayerBindingId id,
- const armnn::TensorInfo& tensorInfo,
- const char* bindingPointDesc,
- std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo);
-
- void TrackOutputBinding(armnn::IConnectableLayer* layer,
- armnn::LayerBindingId id,
- const armnn::TensorInfo& tensorInfo);
-
-
- void SetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName, armnn::IOutputSlot& armnnOutputSlot);
-
- /// Retrieves the Armnn IOutputSlot representing the given Caffe top.
- /// Throws if it cannot be found (e.g. not parsed yet).
- armnn::IOutputSlot& GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const;
-
- static std::pair<armnn::LayerBindingId, armnn::TensorInfo> GetBindingInfo(
- const std::string& layerName,
- const char* bindingPointDesc,
- const std::unordered_map<std::string, BindingPointInfo>& bindingInfos);
-
-
- void Cleanup();
-
- using OperationParsingFunction = void(CaffeParserImpl::*)(const caffe::LayerParameter& layerParam);
-
- /// Maps Caffe layer names to parsing member functions.
- static const std::map<std::string, OperationParsingFunction> ms_CaffeLayerNameToParsingFunctions;
-
- /// maps input layer names to their corresponding ids and tensor infos
- std::unordered_map<std::string, BindingPointInfo> m_NetworkInputsBindingInfo;
-
- /// maps output layer names to their corresponding ids and tensor infos
- std::unordered_map<std::string, BindingPointInfo> m_NetworkOutputsBindingInfo;
-
- armnn::INetworkPtr m_Network;
-
- std::map<std::string, armnn::TensorShape> m_InputShapes;
-
- /// As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops.
- std::unordered_map<std::string, armnn::IOutputSlot*> m_ArmnnOutputSlotForCaffeTop;
-
- std::vector<std::string> m_RequestedOutputs;
-
-
- // Stuff which has gone to base class simply because we haven't done any
- // memory optimisation on the text/string format. If we move this to a layer
- // by layer parse as well these can move to the CaffeParser class.
- std::map<std::string, const caffe::LayerParameter*> m_CaffeLayersByTopName;
-
- /// Parses a NetParameter loaded into memory from one of the other CreateNetwork*
- armnn::INetworkPtr CreateNetworkFromNetParameter(
- caffe::NetParameter& netParam,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs);
-
- /// does the actual conversion from caffe::NetParameter to armnn::INetwork
- void LoadNetParam(caffe::NetParameter& netParameter);
-
- /// Find the Caffe layers listed as inputs (bottoms) for a given layer.
- std::vector<const caffe::LayerParameter*> GetInputs(const caffe::LayerParameter& layerParam);
-
- /// Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same)
- /// with regular layers. This simplifies further parsing.
- void ResolveInPlaceLayers(caffe::NetParameter& netParameter);
-
-};
-
-class CaffeParser : public ICaffeParser::CaffeParserImpl
-{
-public:
-
- /// Create the network from a protobuf binary file on disk
- virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs) override;
-
-public:
- CaffeParser();
-
-};
-}
diff --git a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp b/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
deleted file mode 100644
index b7ff3d8731..0000000000
--- a/src/armnnCaffeParser/RecordByRecordCaffeParser.cpp
+++ /dev/null
@@ -1,731 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RecordByRecordCaffeParser.hpp"
-
-#include "armnn/Exceptions.hpp"
-#include "armnn/Utils.hpp"
-#include <armnn/utility/NumericCast.hpp>
-
-#include "GraphTopologicalSort.hpp"
-
-// Caffe
-#include <google/protobuf/wire_format.h>
-
-
-//#include <stdio.h>
-#include <limits.h>
-#include <sstream>
-//#include <iostream>
-#include <fstream>
-
-namespace armnnCaffeParser
-{
-// class which holds information on the absolute position in the stream
-// of the data and the length of the data record.
-class VarLenDataInfo
-{
-public:
- VarLenDataInfo(std::streamoff positionOfData, size_t sizeOfData) :
- m_PositionOfData(positionOfData), m_SizeOfData(sizeOfData) {}
-
- VarLenDataInfo(const VarLenDataInfo& x) :
- m_PositionOfData(x.PositionOfData()), m_SizeOfData (x.SizeOfData()) {}
-
- VarLenDataInfo& operator=(const VarLenDataInfo& x)
- {
- // handle self assignment
- if (this == &x) {
- return *this;
- }
- m_PositionOfData = x.PositionOfData(); m_SizeOfData = x.SizeOfData(); return *this;
- }
-
- std::streamoff PositionOfData() const {return m_PositionOfData;}
- size_t SizeOfData() const {return m_SizeOfData;}
-
-private:
- std::streamoff m_PositionOfData;
- size_t m_SizeOfData;
-
-};
-
-// class which holds enough information on a LayerParameter in the Caffe protobuf
-// format to allow it to be resolved for in place layering and sorted topologically
-// prior to the entire record being parsed into memory.
-//
-// NOTE: function naming follows that of the protobuf classes these proxies are standing in for
-class LayerParameterInfo : public VarLenDataInfo
-{
-public:
- static const std::string INPUT;
- LayerParameterInfo(const VarLenDataInfo& varLenDataInfo) :
- VarLenDataInfo(varLenDataInfo.PositionOfData(), varLenDataInfo.SizeOfData()),
- m_newTops(false), m_newBottoms(false) {}
-
- LayerParameterInfo(std::streamoff positionOfData, size_t sizeOfData) :
- VarLenDataInfo(positionOfData, sizeOfData), m_newTops(false), m_newBottoms(false) {}
-
- LayerParameterInfo(const LayerParameterInfo& x) :
- VarLenDataInfo(x.PositionOfData(), x.SizeOfData()),
- m_name(x.m_name),
- m_type(x.m_type),
- m_tops(x.m_tops),
- m_bottoms(x.m_bottoms),
- m_newTops(x.m_newTops),
- m_newBottoms(x.m_newBottoms) {}
-
- LayerParameterInfo& operator=(const LayerParameterInfo& x)
- {
- if (this == &x) {
- return *this;
- }
- VarLenDataInfo::operator=(x);
- m_name = x.m_name;
- m_type = x.m_type;
- m_tops = x.m_tops;
- m_bottoms = x.m_bottoms;
- m_newTops = x.m_newTops;
- m_newBottoms = x.m_newBottoms;
- return *this;
- }
-
- const std::string name() const {return m_name;}
- void set_name(const std::unique_ptr<char[]>& theName, size_t length)
- {
- m_name = std::string(theName.get(), length);
- }
- void set_name(const std::string& theName) {m_name = theName;}
-
- const std::string type() const {return m_type;}
- void set_type(const std::unique_ptr<char[]>& theType, size_t length)
- {
- m_type = std::string(theType.get(), length);
- }
- void set_type(const std::string& theType) {m_type = theType;}
-
- void add_top(const std::unique_ptr<char[]>& top, size_t length)
- {
- std::string topName(top.get(), length);
- m_tops.push_back(topName);
- }
- void add_top(const std::string& topName)
- {
- m_tops.push_back(topName);
- }
- const std::string top(unsigned long i) const {return m_tops[i];}
- unsigned long top_size() const {return m_tops.size();}
- void set_top(unsigned long i, const std::string& newName) {m_tops[i] = newName; m_newTops = true;}
- bool new_tops() const {return m_newTops;}
-
- void add_bottom(const std::unique_ptr<char[]>& bottom, size_t length)
- {
- std::string bottomName(bottom.get(), length);
- m_bottoms.push_back(bottomName);
- }
- unsigned long bottom_size() const {return m_bottoms.size();}
- const std::string bottom(unsigned long i) const {return m_bottoms[i];}
- void set_bottom(unsigned long i, const std::string& newName) {m_bottoms[i] = newName; m_newBottoms = true;}
- bool new_bottoms() const {return m_newBottoms;}
-
- // if the position and size of the data is zero and the type is "Input" then this is an 'Implicit Input Layer'
- // and needs to be handled differently from ordinary layers.
- bool isImplicitInputLayer() const
- {
- if ((PositionOfData() == 0) && (SizeOfData() == 0) && INPUT.compare(type()) == 0)
- {return true;} else {return false;}
- }
-
-private:
- std::string m_name;
- std::string m_type;
- std::vector<std::string> m_tops;
- std::vector<std::string> m_bottoms;
- // mark the layers whose topology was changed
- // by the ResolveInPlaceLayers method.
- bool m_newTops;
- bool m_newBottoms;
-};
-
-// class which holds the field type (wire type) and field id (id from the .proto schema)
-// read from the protobuf messages as per the binary encoding described in
-// https://developers.google.com/protocol-buffers/docs/encoding
-//
-// NOTE: function naming follows that of the protobuf classes these proxies are standing in for
-class ProtobufFieldInfo
-{
-public:
- ProtobufFieldInfo(int field_type, int field_id) :
- m_eof(false), m_field_type(field_type), m_field_id(field_id) {}
- ProtobufFieldInfo() : m_eof(true), m_field_type(0), m_field_id(0) {}
-
- bool eof() {return m_eof;}
- int field_type() {return m_field_type;}
- int field_id() {return m_field_id;}
-
-private:
- bool m_eof;
- int m_field_type;
- int m_field_id;
-};
-
-
-// There are some NetParameter level data which are required
-// to correctly processes some Caffe models. Specifically those which
-// have 'implicit' input layers. Also it is nice to have the name of the model.
-//
-// NOTE: function naming follows that of the protobuf classes these proxies are standing in for
-class NetParameterInfo
-{
-public:
- const std::string name() const {return m_name;}
- void set_name(const std::unique_ptr<char[]>& theName, size_t length)
- {
- m_name = std::string(theName.get(), length);
- }
-
- void add_input(const std::unique_ptr<char[]>& input, size_t length)
- {
- std::string inputName(input.get(), length);
- m_inputs.push_back(inputName);
- }
- const std::string input(unsigned long i) const {return m_inputs[i];}
- unsigned long input_size() const {return m_inputs.size();}
-
- void add_input_dimension(int input_dimension) {
- m_input_dimensions.push_back(input_dimension);
- }
- int input_dimension(unsigned long i) const {return m_input_dimensions[i];}
- unsigned long input_dimensions_size() const {return m_input_dimensions.size();}
-
- void add_blob_shape(caffe::BlobShape shape) {
- m_blob_shapes.push_back(shape);
- }
- const caffe::BlobShape blob_shape(unsigned long i) const {return m_blob_shapes[i];}
- unsigned long blob_shapes_size() const {return m_blob_shapes.size();}
-
-private:
- std::string m_name;
- std::vector<std::string> m_inputs;
- std::vector<int> m_input_dimensions;
- std::vector<caffe::BlobShape> m_blob_shapes;
-
-};
-
-}; // namespace armnnCaffeParser
-
-using namespace armnnCaffeParser;
-
-// Initialise the class const
-const std::string LayerParameterInfo::INPUT = "Input";
-
-namespace
-{
-
-ProtobufFieldInfo readFieldInfo(std::ifstream& ifs)
-{
- unsigned char first_byte = static_cast<unsigned char>(ifs.get());
- if (!ifs.good())
- {
- ProtobufFieldInfo eof;
- return eof;
- }
- int field_type = first_byte&7;
- int field_id = first_byte>>3;
- if ((field_id & 16) == 16)
- {
- unsigned char second_byte = static_cast<unsigned char>(ifs.get());
- if (!ifs.good())
- {
- ProtobufFieldInfo eof;
- return eof;
- }
- field_id = (field_id-16) + ((second_byte&127)<<4);
- }
- ProtobufFieldInfo fieldInfo(field_type, field_id);
- return fieldInfo;
-}
-
-const static int MAX_NUM_BYTES = 5;
-
-int ReadBase128(std::ifstream& ifs)
-{
- int result = 0;
- unsigned int shift_by = 0;
- int bytesRead = 0;
- while (true)
- {
- unsigned char a_byte = static_cast<unsigned char>(ifs.get());
- ++bytesRead;
- if (bytesRead > MAX_NUM_BYTES)
- {
- throw armnn::ParseException(
- "ReadBase128 exceeded the maximum number of bytes expected for an integer representation");
- }
- result += (a_byte & 127) << shift_by;
- shift_by += 7;
- if ((a_byte & 128) != 128)
- {
- break;
- }
- }
- return result;
-}
-
-
-std::unique_ptr<char[]> AllocateBuffer(std::ifstream& ifs, VarLenDataInfo& dataInfo)
-{
- std::unique_ptr<char[]> ptr(new char[dataInfo.SizeOfData()]);
- ifs.clear();
- ifs.seekg(dataInfo.PositionOfData(), std::ios_base::beg);
- ifs.read(ptr.get(), armnn::numeric_cast<std::streamsize>(dataInfo.SizeOfData()));
- return ptr;
-}
-
-VarLenDataInfo CreateVarLenDataInfo(std::streamoff bufferStart, std::streamoff endOfLayer) {
- std::streamoff sizeOfLayer = endOfLayer - bufferStart;
- if (sizeOfLayer < 0)
- {
- std::stringstream ss;
- ss << "error when determining buffer size, negative value [" << sizeOfLayer << "]";
- throw armnn::ParseException(ss.str());
- }
- // NOTE: as some of the data being read in will be translated into strings (names of layers etc)
- // the maximum size we can deal with is the upper size limit of a string i.e. size_t
- // on the platform in which I am currently compiling std::streamoff is signed long int and
- // size_t is unsigned long int so there is no way this error condition can fire but this stuff
- // is supposed to be portable so the check remains in place
- if (armnn::numeric_cast<size_t>(sizeOfLayer) > SIZE_MAX) {
- std::stringstream ss;
- ss << "layer is greater than " << SIZE_MAX << " in size cannot process. layer size = [" << sizeOfLayer << "]";
- throw armnn::ParseException(ss.str());
- }
- LayerParameterInfo info(bufferStart, armnn::numeric_cast<size_t>(sizeOfLayer));
- return info;
-}
-
-void ReadTopologicalInfoForLayerParameter(LayerParameterInfo& layerInfo, std::ifstream& ifs)
-{
- // position the file pointer to the start of the layer data
- ifs.clear();
- ifs.seekg(layerInfo.PositionOfData(), std::ios_base::beg);
- std::streamoff endOfLayer = layerInfo.PositionOfData() +
- armnn::numeric_cast<std::streamoff>(layerInfo.SizeOfData());
- while(true)
- {
- // check to see if we have reached the end of the record
- std::streamoff currentPosition = ifs.tellg();
- if (currentPosition >= endOfLayer) {
- return;
- }
- // read the information for the next field.
- ProtobufFieldInfo fieldInfo = readFieldInfo(ifs);
- if (fieldInfo.eof())
- {
- return;
- // TODO: figure out whether this is an error condition or not...
- //throw armnn::ParseException("failed to read field from LayerParameter data");
- }
- // process the field
- switch (fieldInfo.field_type())
- {
- case 0:
- {
- ReadBase128(ifs);
- break;
- }
- case 2:
- {
- int size = ReadBase128(ifs);
- std::streamoff posStartOfData = ifs.tellg();
- VarLenDataInfo dataInfo(posStartOfData, armnn::numeric_cast<size_t>(size));
- //optional string name = 1; // the layer name
- //optional string type = 2; // the layer type
- //repeated string bottom = 3; // the name of each bottom blob
- //repeated string top = 4; // the name of each top blob
- if (fieldInfo.field_id() == 1)
- {
- // read and set the name of the layer
- auto layerName = AllocateBuffer(ifs, dataInfo);
- layerInfo.set_name(layerName, dataInfo.SizeOfData());
- }
- else if (fieldInfo.field_id() == 2)
- {
- // read and set the type of the layer
- auto layerType = AllocateBuffer(ifs, dataInfo);
- layerInfo.set_type(layerType, dataInfo.SizeOfData());
- }
- else if (fieldInfo.field_id() == 3)
- {
- // read and add a bottom to the layer
- auto bottom = AllocateBuffer(ifs, dataInfo);
- layerInfo.add_bottom(bottom, dataInfo.SizeOfData());
- }
- else if (fieldInfo.field_id() == 4)
- {
- // read and add a top to the layer
- auto top = AllocateBuffer(ifs, dataInfo);
- layerInfo.add_top(top, dataInfo.SizeOfData());
- }
- else
- {
- ifs.seekg(size, std::ios_base::cur);
- if (!ifs.good())
- {
- // TODO: error out?
- return;
- }
- }
- break;
- }
- case 1:
- {
- // 64 bit
- // advance by eight bytes
- ifs.seekg(8, std::ios_base::cur);
- if (!ifs.good())
- {
- // TODO: error out?
- return;
- }
- break;
- }
- case 5:
- {
- // 32 bit
- // advance by four bytes
- ifs.seekg(4, std::ios_base::cur);
- if (!ifs.good())
- {
- // TODO: error out?
- return;
- }
- break;
- }
- default:
- {
- throw armnn::ParseException("Encounted an unknown field type");
- break;
- }
- }
- }
-}
-
-void ResolveInPlaceLayers(std::vector<LayerParameterInfo>& layerInfo)
-{
- std::map<std::string, std::vector<LayerParameterInfo*>> layersByTop;
- for (auto& info : layerInfo)
- {
- for (unsigned long i = 0; i < info.top_size(); ++i)
- {
- layersByTop[info.top(i)].push_back(&info);
- }
- }
- // For each set of layers with the same top, resolve them to a linear chain rather than in-place layers.
- // Note that for 'regular' layers, there will be a single layer in each group and so this will be a no-op.
- for (auto& layersWithSameTopIterator : layersByTop)
- {
- const std::string& top = layersWithSameTopIterator.first;
- const std::vector<LayerParameterInfo*> layersWithSameTop = layersWithSameTopIterator.second;
-
- // Chain the layers together in the order that they are listed in the prototxt (hopefully this is correct).
- // Note that the last layer will not have its top modified so that other layers will continue to reference it.
- for (unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
- {
- LayerParameterInfo* layer1 = layersWithSameTop[layerIdx];
- LayerParameterInfo* layer2 = layersWithSameTop[layerIdx + 1];
- if (layer1->top_size() != 1)
- {
- throw armnn::ParseException("Node '" + layer1->name() + "' is an in-place layer but "
- "doesn't have exactly one top.");
- }
- std::string newTop = layer1->name() + "_top";
- layer1->set_top(0, newTop);
- if (layer2->bottom_size() != 1 || layer2->bottom(0) != top)
- {
- throw armnn::ParseException("Node '" + layer2->name() + "' is an in-place layer but "
- " doesn't have exactly one bottom, or it doesn't match its top.");
- }
- layer2->set_bottom(0, newTop);
-
- }
- }
-}
-
-} // anonymous namespace, can't be seen outside this source file
-
-RecordByRecordCaffeParser::RecordByRecordCaffeParser() : CaffeParserImpl()
-{}
-
-armnn::INetworkPtr RecordByRecordCaffeParser::CreateNetworkFromBinaryFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs)
-{
-
- m_InputShapes = inputShapes;
- if (requestedOutputs.size() == 0)
- {
- throw armnn::ParseException("requestedOutputs must have at least one entry");
- }
- m_RequestedOutputs = requestedOutputs;
-
- std::ifstream ifs(graphFile, std::ifstream::in|std::ifstream::binary);
- if (ifs.fail())
- {
- throw armnn::FileNotFoundException("Failed to open graph file '" + std::string(graphFile) + "'");
- }
-
- std::vector<LayerParameterInfo> layerInfo;
- NetParameterInfo netParameterInfo;
- while(true)
- {
- ProtobufFieldInfo fieldInfo = readFieldInfo(ifs);
- if (fieldInfo.eof())
- {
- break;
- }
- switch(fieldInfo.field_type())
- {
- case 0:
- {
- ReadBase128(ifs);
- break;
- }
- case 2:
- {
- // The values of interest from the caffe.proto schema are:
- // optional string name = 1; // consider giving the network a name
- // DEPRECATED. See InputParameter. The input blobs to the network.
- // repeated string input = 3;
- // DEPRECATED. See InputParameter. The shape of the input blobs.
- // repeated BlobShape input_shape = 8;
-
- // 4D input dimensions -- deprecated. Use "input_shape" instead.
- // If specified, for each input blob there should be four
- // values specifying the num, channels, height and width of the input blob.
- // Thus, there should be a total of (4 * #input) numbers.
- // repeated int32 input_dim = 4;
-
- // The layers that make up the net. Each of their configurations, including
- // connectivity and behavior, is specified as a LayerParameter.
- // repeated LayerParameter layer = 100; // ID 100 so layers are printed last.
-
- // The first four will (if present) be read into the NetParameterInfo
- // the LayerParameters will be read into the LayerParameterInfo vector.
-
- int size = ReadBase128(ifs);
- std::streamoff posStartOfData = ifs.tellg();
- ifs.seekg(size, std::ios_base::cur);
- if(!ifs.good())
- {
- throw armnn::ParseException("failed to seek ahead in binary caffe file");
- }
- std::streamoff endOfLayer = ifs.tellg();
- if (fieldInfo.field_id() == 1)
- {
- VarLenDataInfo dataInfo = CreateVarLenDataInfo(posStartOfData, endOfLayer);
- auto graphName = AllocateBuffer(ifs, dataInfo);
- netParameterInfo.set_name(graphName, dataInfo.SizeOfData());
- }
- if (fieldInfo.field_id() == 3)
- {
- VarLenDataInfo dataInfo = CreateVarLenDataInfo(posStartOfData, endOfLayer);
- auto inputName = AllocateBuffer(ifs, dataInfo);
- netParameterInfo.add_input(inputName, dataInfo.SizeOfData());
- }
- if (fieldInfo.field_id() == 8)
- {
- VarLenDataInfo dataInfo = CreateVarLenDataInfo(posStartOfData, endOfLayer);
- auto inputShape = AllocateBuffer(ifs, dataInfo);
- caffe::BlobShape blobShape;
- bool bRet = blobShape.ParseFromArray(inputShape.get(), static_cast<int>(dataInfo.SizeOfData()));
- if (!bRet)
- {
- throw armnn::ParseException("Failed to parse input shape");
- }
- netParameterInfo.add_blob_shape(blobShape);
- }
- if (fieldInfo.field_id() == 4)
- {
- int input_dim = ReadBase128(ifs);
- netParameterInfo.add_input_dimension(input_dim);
- }
- if (fieldInfo.field_id() == 100)
- {
- LayerParameterInfo info(CreateVarLenDataInfo(posStartOfData, endOfLayer));
- ReadTopologicalInfoForLayerParameter(info, ifs);
- layerInfo.push_back(info);
- }
- break;
- }
- default:
- {
- break;
- }
- }
- }
- std::vector<const LayerParameterInfo*> sortedNodes;
- ProcessLayers(netParameterInfo, layerInfo, m_RequestedOutputs, sortedNodes);
- armnn::INetworkPtr networkPtr = LoadLayers(ifs, sortedNodes, netParameterInfo);
- return networkPtr;
-
-}
-
-void RecordByRecordCaffeParser::ProcessLayers(
- const NetParameterInfo& netParameterInfo,
- std::vector<LayerParameterInfo>& layerInfo,
- const std::vector<std::string>& m_RequestedOutputs,
- std::vector<const LayerParameterInfo*>& sortedNodes)
-{
- // if there is an implicit input layer add it to the layerInfo list
- if (netParameterInfo.input_size() > 0)
- {
- LayerParameterInfo implicitInputLayer(0, 0);
- implicitInputLayer.set_type(LayerParameterInfo::INPUT);
- implicitInputLayer.set_name(netParameterInfo.input(0));
- implicitInputLayer.add_top(netParameterInfo.input(0));
- layerInfo.push_back(implicitInputLayer);
- }
- ::ResolveInPlaceLayers(layerInfo);
-
- for (LayerParameterInfo& info : layerInfo)
- {
- for (unsigned long i = 0; i < info.top_size(); ++i)
- {
- m_CaffeLayersByTopName[info.top(i)] = &info;
- }
- }
-
- // Find the output layers the user requested
- std::vector<const LayerParameterInfo*> targetLayers;
- for (const std::string& requestedOutputName : m_RequestedOutputs)
- {
- auto nodeIt = m_CaffeLayersByTopName.find(requestedOutputName);
- if (nodeIt == m_CaffeLayersByTopName.end())
- {
- throw armnn::ParseException(
- "Couldn't find requested output layer '" + requestedOutputName + "' in graph");
- }
- targetLayers.push_back(nodeIt->second);
- }
-
- // Sort them into a linear ordering such that all inputs of a node are before the node itself
- if (!armnnUtils::GraphTopologicalSort<const LayerParameterInfo*>(
- targetLayers,
- [this](const LayerParameterInfo* node)
- {
- return GetInputs(*node);
- },
- sortedNodes))
- {
- throw armnn::ParseException("Cycle detected in graph");
- }
-}
-
-
-std::vector<const LayerParameterInfo*> RecordByRecordCaffeParser::GetInputs(
- const LayerParameterInfo& layerParam)
-{
- std::vector<const LayerParameterInfo*> ret;
- ret.reserve(layerParam.bottom_size());
- for (unsigned long j = 0; j < layerParam.bottom_size(); ++j)
- {
- std::string inputName = layerParam.bottom(j);
- auto inputIt = m_CaffeLayersByTopName.find(inputName);
- if (inputIt == m_CaffeLayersByTopName.end())
- {
- throw armnn::ParseException(
- "Can't find Caffe layer with top called '" + inputName + "', which is listed as an input of '" +
- layerParam.name() + "'");
- }
- ret.push_back(inputIt->second);
- }
-
- return ret;
-}
-
-armnn::INetworkPtr RecordByRecordCaffeParser::LoadLayers(std::ifstream& ifs,
- std::vector<const LayerParameterInfo *>& sortedNodes,
- const NetParameterInfo& netParameterInfo)
-{
-
- m_NetworkInputsBindingInfo.clear();
- m_NetworkOutputsBindingInfo.clear();
-
- m_Network = armnn::INetwork::Create();
-
- for (auto info : sortedNodes)
- {
- caffe::LayerParameter layer;
- if (info->isImplicitInputLayer())
- {
- // create the matching Layer Parameter programatically from the data in the
- // net parameter info which has been passed in...
- layer.set_type(LayerParameterInfo::INPUT);
- layer.set_name(netParameterInfo.input(0));
- layer.add_top(netParameterInfo.input(0));
-
- caffe::InputParameter* inputParam = layer.mutable_input_param();
- caffe::BlobShape* shape = inputParam->add_shape();
-
- long unsigned int dim_size = netParameterInfo.input_dimensions_size();
- for (long unsigned int i = 0; i < dim_size; ++i)
- {
- shape->add_dim(netParameterInfo.input_dimension(i));
- }
- }
- else
- {
- char *buffer = new char[info->SizeOfData()];
- ifs.clear();
- ifs.seekg(info->PositionOfData(), std::ios_base::beg);
- ifs.read(buffer, armnn::numeric_cast<std::streamsize>(info->SizeOfData()));
- bool bRet = layer.ParseFromArray(buffer, static_cast<int>(info->SizeOfData()));
- delete[] buffer;
- if (!bRet)
- {
- throw armnn::ParseException("Failed to parse layer [" + info->name() + "]");
- }
- }
-
- if (info->new_tops())
- {
- //update the tops
- layer.set_top(0, info->top(0));
- }
- if (info->new_bottoms())
- {
- //update the bottoms
- layer.set_bottom(0, info->bottom(0));
- }
-
- auto it = ms_CaffeLayerNameToParsingFunctions.find(layer.type());
- if (it == ms_CaffeLayerNameToParsingFunctions.end())
- {
- throw armnn::ParseException("Unsupported layer type '" + layer.type() + "'");
- }
- auto func = it->second;
- (this->*func)(layer);
- }
- ifs.close();
-
- // Add ArmNN output layers connected to each requested output
- for (const std::string& requestedOutput : m_RequestedOutputs)
- {
- armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
-
- const armnn::LayerBindingId outputId = armnn::numeric_cast<armnn::LayerBindingId>(
- m_NetworkOutputsBindingInfo.size());
- armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
- outputSlot.Connect(outputLayer->GetInputSlot(0));
-
- TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
- }
-
- Cleanup();
-
- return move(m_Network);
-}
diff --git a/src/armnnCaffeParser/RecordByRecordCaffeParser.hpp b/src/armnnCaffeParser/RecordByRecordCaffeParser.hpp
deleted file mode 100644
index aab2fb025b..0000000000
--- a/src/armnnCaffeParser/RecordByRecordCaffeParser.hpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <string>
-#include <vector>
-#include <iostream>
-
-#include "caffe/proto/caffe.pb.h"
-
-#include "CaffeParser.hpp"
-
-
-
-namespace armnnCaffeParser
-{
-
-class NetParameterInfo;
-class LayerParameterInfo;
-
-
-class RecordByRecordCaffeParser : public ICaffeParser::CaffeParserImpl
-{
-public:
-
- /// Create the network from a protobuf binary file on disk
- virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(
- const char* graphFile,
- const std::map<std::string, armnn::TensorShape>& inputShapes,
- const std::vector<std::string>& requestedOutputs) override;
-
- RecordByRecordCaffeParser();
-
-private:
- void ProcessLayers(const NetParameterInfo& netParameterInfo,
- std::vector<LayerParameterInfo>& layerInfo,
- const std::vector<std::string>& m_RequestedOutputs,
- std::vector<const LayerParameterInfo*>& sortedNodes);
- armnn::INetworkPtr LoadLayers(std::ifstream& ifs,
- std::vector<const LayerParameterInfo *>& sortedNodes,
- const NetParameterInfo& netParameterInfo);
- std::vector<const LayerParameterInfo*> GetInputs(
- const LayerParameterInfo& layerParam);
-
- std::map<std::string, const LayerParameterInfo*> m_CaffeLayersByTopName;
- std::vector<std::string> m_RequestedOutputs;
-};
-
-} // namespace armnnCaffeParser
-
diff --git a/src/armnnCaffeParser/test/TestAdd.cpp b/src/armnnCaffeParser/test/TestAdd.cpp
deleted file mode 100644
index ab087cbb89..0000000000
--- a/src/armnnCaffeParser/test/TestAdd.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct AddFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- AddFixture()
- {
- m_Prototext = "name: \"MinimalAdd\"\n"
- "layer {\n"
- " name: \"data\"\n"
- " type: \"Input\"\n"
- " top: \"data\"\n"
- " input_param { shape: { dim: 1 dim: 1 dim: 4 dim: 4 } }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"pool1\"\n"
- " name: \"pool1\"\n"
- " type: \"Pooling\"\n"
- " pooling_param {\n"
- " kernel_size: 2\n"
- " stride: 2\n"
- " pool: MAX\n"
- " }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"pool2\"\n"
- " name: \"pool2\"\n"
- " type: \"Pooling\"\n"
- " pooling_param {\n"
- " kernel_size: 2\n"
- " stride: 2\n"
- " pool: MAX\n"
- " }\n"
- "}\n"
- "layer {\n"
- " bottom: \"pool1\"\n"
- " bottom: \"pool2\"\n"
- " top: \"add\"\n"
- " name: \"add\"\n"
- " type: \"Eltwise\"\n"
- "}\n";
- SetupSingleInputSingleOutput("data", "add");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(ParseAdd, AddFixture)
-{
- RunTest<4>(
- {
- 0, 1, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 1, 0,
- 1, 0, 1, 1
- },
- {
- 2, 0,
- 2, 2
- });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestConcat.cpp b/src/armnnCaffeParser/test/TestConcat.cpp
deleted file mode 100644
index 2d952865f4..0000000000
--- a/src/armnnCaffeParser/test/TestConcat.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct ConcatFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- ConcatFixture()
- {
- m_Prototext = "name: \"Concat\"\n"
- "layer {\n"
- " name: \"data\"\n"
- " type: \"Input\"\n"
- " top: \"data\"\n"
- " input_param { shape: { dim: 1 dim: 1 dim: 4 dim: 4 } }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"pool1\"\n"
- " name: \"pool1\"\n"
- " type: \"Pooling\"\n"
- " pooling_param {\n"
- " kernel_size: 2\n"
- " stride: 2\n"
- " pool: MAX\n"
- " }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"pool2\"\n"
- " name: \"pool2\"\n"
- " type: \"Pooling\"\n"
- " pooling_param {\n"
- " kernel_size: 2\n"
- " stride: 2\n"
- " pool: MAX\n"
- " }\n"
- "}\n"
- "layer {\n"
- " bottom: \"pool1\"\n"
- " bottom: \"pool2\"\n"
- " top: \"concat\"\n"
- " name: \"concat\"\n"
- " type: \"Concat\"\n"
- "}\n";
- SetupSingleInputSingleOutput("data", "concat");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(ParseConcat, ConcatFixture)
-{
- RunTest<4>(
- {
- 0, 1, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 1, 0,
- 1, 0, 1, 1
- },
- {
- 1, 0,
- 1, 1,
-
- 1, 0,
- 1, 1
- });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestConvolution.cpp b/src/armnnCaffeParser/test/TestConvolution.cpp
deleted file mode 100644
index b881f1f58f..0000000000
--- a/src/armnnCaffeParser/test/TestConvolution.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-#include <sstream>
-#include <initializer_list>
-
-namespace
-{
-
-template <typename T>
-std::string TaggedSequence(const std::string & tag, const std::initializer_list<T> & data)
-{
- bool first = true;
- std::stringstream ss;
- for (auto && d : data)
- {
- if (!first)
- {
- ss << " , ";
- }
- else
- {
- first = false;
- }
- ss << " " << tag << " : " << d << " ";
- }
- return ss.str();
-}
-
-template <typename T>
-std::string TaggedSequence(const std::string & tag, T data, unsigned int n)
-{
- std::stringstream ss;
- for (unsigned int i=0; i<n; ++i)
- {
- if (i>0)
- {
- ss << " , ";
- }
- ss << " " << tag << " : " << data << " ";
- }
- return ss.str();
-}
-
-} // namespace <anonymous>
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct ConvolutionFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- ConvolutionFixture(const std::initializer_list<unsigned int> & inputDims,
- const std::initializer_list<float> & filterData,
- unsigned int kernelSize,
- unsigned int numOutput=1,
- unsigned int group=1)
- {
- m_Prototext = R"(
- name: "ConvolutionTest"
- layer {
- name: "input1"
- type: "Input"
- top: "input1"
- input_param { shape: { )" + TaggedSequence("dim", inputDims) + R"( } }
- }
- layer {
- name: "conv1"
- type: "Convolution"
- bottom: "input1"
- top: "conv1"
- blobs: { )" + TaggedSequence("data", filterData) + R"( }
- blobs: { )" + TaggedSequence("data", 0, numOutput) + R"( }
- convolution_param {
- num_output: )" + std::to_string(numOutput) + R"(
- pad: 0
- kernel_size: )" + std::to_string(kernelSize) + R"(
- stride: 1
- group: )" + std::to_string(group) + R"(
- }
- }
- )";
- SetupSingleInputSingleOutput("input1", "conv1");
- }
-};
-
-struct SimpleConvolutionFixture : public ConvolutionFixture
-{
- SimpleConvolutionFixture()
- : ConvolutionFixture( {1, 1, 2, 2}, {1.0f, 1.0f, 1.0f, 1.0f}, 2)
- {
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(SimpleConvolution, SimpleConvolutionFixture)
-{
- RunTest<4>({ 1, 3, 5, 7 }, { 16 });
-}
-
-struct GroupConvolutionFixture : public ConvolutionFixture
-{
- GroupConvolutionFixture()
- : ConvolutionFixture(
- {1, 2, 2, 2},
- {
- 1.0f, 1.0f, 1.0f, 1.0f, // filter for channel #0
- 2.0f, 2.0f, 2.0f, 2.0f // filter for channel #1
- },
- 2, // kernel size is 2x2
- 2, // number of output channels is 2
- 2) // number of groups (separate filters)
- {
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(GroupConvolution, GroupConvolutionFixture)
-{
- RunTest<4>(
- {
- 1, 2, 3, 4, // input channel #0
- 5, 6, 7, 8, // input channel #1
- },
- {
- 10, // convolution result for channel #0 applying filter #0
- 52 // same for channel #1 and filter #1
- }
- );
-}
-
-
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnCaffeParser/test/TestDropout.cpp b/src/armnnCaffeParser/test/TestDropout.cpp
deleted file mode 100644
index 503766248b..0000000000
--- a/src/armnnCaffeParser/test/TestDropout.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct DropoutFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- DropoutFixture()
- {
- m_Prototext = "name: \"DropoutFixture\"\n"
- "layer {\n"
- " name: \"data\"\n"
- " type: \"Input\"\n"
- " top: \"data\"\n"
- " input_param { shape: { dim: 1 dim: 1 dim: 2 dim: 2 } }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"drop1\"\n"
- " name: \"drop1\"\n"
- " type: \"Dropout\"\n"
- "}\n"
- "layer {\n"
- " bottom: \"drop1\"\n"
- " bottom: \"drop1\"\n"
- " top: \"add\"\n"
- " name: \"add\"\n"
- " type: \"Eltwise\"\n"
- "}\n";
- SetupSingleInputSingleOutput("data", "add");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(ParseDropout, DropoutFixture)
-{
- RunTest<4>(
- {
- 1, 2,
- 3, 4,
- },
- {
- 2, 4,
- 6, 8
- });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestInPlace.cpp b/src/armnnCaffeParser/test/TestInPlace.cpp
deleted file mode 100644
index 2495e2182e..0000000000
--- a/src/armnnCaffeParser/test/TestInPlace.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-// The pooling layer should take its input from the relu, not the add directly.
-struct InPlaceFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- InPlaceFixture()
- {
- m_Prototext = R"(
-name: "InPlace"
-layer {
- name: "data"
- type: "Input"
- top: "data"
- input_param { shape: { dim: 1 dim: 1 dim: 1 dim: 1 } }
-}
-layer {
- bottom: "data"
- bottom: "data"
- top: "add"
- name: "add"
- type: "Eltwise"
-}
-layer {
- name: "relu"
- type: "ReLU"
- bottom: "add"
- top: "relu"
- phase: TEST
-}
-layer {
- name: "pool"
- type: "Pooling"
- bottom: "relu"
- top: "pool"
- phase: TEST
- pooling_param {
- pool: MAX
- kernel_size: 1
- stride: 1
- }
-}
- )";
- SetupSingleInputSingleOutput("data", "pool");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(ParseInPlace, InPlaceFixture)
-{
- RunTest<1>({ -1.0f }, { 0.0f });
-}
-
-// The requested output of the network is a layer which has an activation attached.
-// The output of the network should therefore actually be the activation layer.
-struct InPlaceOutputFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- InPlaceOutputFixture()
- {
- m_Prototext = R"(
-name: "InPlace"
-layer {
- name: "data"
- type: "Input"
- top: "data"
- input_param { shape: { dim: 1 dim: 1 dim: 1 dim: 1 } }
-}
-layer {
- bottom: "data"
- bottom: "data"
- top: "add"
- name: "add"
- type: "Eltwise"
-}
-layer {
- name: "relu"
- type: "ReLU"
- bottom: "add"
- top: "add"
- phase: TEST
-}
- )";
- SetupSingleInputSingleOutput("data", "add");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(InPlaceOutput, InPlaceOutputFixture)
-{
- RunTest<1>({ -1.0f }, { 0.0f });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestInputs.cpp b/src/armnnCaffeParser/test/TestInputs.cpp
deleted file mode 100644
index 96d8e2b8af..0000000000
--- a/src/armnnCaffeParser/test/TestInputs.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "armnn/IRuntime.hpp"
-#include "armnn/INetwork.hpp"
-#include "armnn/Exceptions.hpp"
-
-#include "test/TensorHelpers.hpp"
-
-#include <string>
-
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-
-BOOST_AUTO_TEST_CASE(InputShapes)
-{
- std::string explicitInput = "name: \"Minimal\"\n"
- "layer {\n"
- " name: \"data\"\n"
- " type: \"Input\"\n"
- " top: \"data\"\n"
- " input_param { shape: { dim: 1 dim: 2 dim: 3 dim: 4 } }\n"
- "}";
- std::string implicitInput = "name: \"Minimal\"\n"
- "input: \"data\" \n"
- "input_dim: 1 \n"
- "input_dim: 2 \n"
- "input_dim: 3 \n"
- "input_dim: 4 \n";
- std::string implicitInputNoShape = "name: \"Minimal\"\n"
- "input: \"data\" \n";
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- armnnCaffeParser::ICaffeParserPtr parser(armnnCaffeParser::ICaffeParser::Create());
- armnn::INetworkPtr network(nullptr, nullptr);
- armnn::NetworkId netId;
-
- // Check everything works normally
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- {
- network = parser->CreateNetworkFromString(explicitInput.c_str(), {}, { "data" });
- BOOST_TEST(network.get());
- runtime->LoadNetwork(netId, Optimize(*network, backends, runtime->GetDeviceSpec()));
-
- armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data");
- armnn::TensorInfo inputTensorInfo = inputBindingInfo.second;
- BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first)));
-
- BOOST_TEST(inputTensorInfo.GetShape()[0] == 1);
- BOOST_TEST(inputTensorInfo.GetShape()[1] == 2);
- BOOST_TEST(inputTensorInfo.GetShape()[2] == 3);
- BOOST_TEST(inputTensorInfo.GetShape()[3] == 4);
- }
-
- // Checks everything works with implicit input.
- {
- network = parser->CreateNetworkFromString(implicitInput.c_str(), {}, { "data" });
- BOOST_TEST(network.get());
- runtime->LoadNetwork(netId, Optimize(*network, backends, runtime->GetDeviceSpec()));
-
- armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data");
- armnn::TensorInfo inputTensorInfo = inputBindingInfo.second;
- BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first)));
-
- BOOST_TEST(inputTensorInfo.GetShape()[0] == 1);
- BOOST_TEST(inputTensorInfo.GetShape()[1] == 2);
- BOOST_TEST(inputTensorInfo.GetShape()[2] == 3);
- BOOST_TEST(inputTensorInfo.GetShape()[3] == 4);
- }
-
- // Checks everything works with implicit and passing shape.
- {
- network = parser->CreateNetworkFromString(implicitInput.c_str(), { {"data", { 2, 2, 3, 4 } } }, { "data" });
- BOOST_TEST(network.get());
- runtime->LoadNetwork(netId, Optimize(*network, backends, runtime->GetDeviceSpec()));
-
- armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data");
- armnn::TensorInfo inputTensorInfo = inputBindingInfo.second;
- BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first)));
-
- BOOST_TEST(inputTensorInfo.GetShape()[0] == 2);
- BOOST_TEST(inputTensorInfo.GetShape()[1] == 2);
- BOOST_TEST(inputTensorInfo.GetShape()[2] == 3);
- BOOST_TEST(inputTensorInfo.GetShape()[3] == 4);
- }
-
- // Checks everything works with implicit (no shape) and passing shape.
- {
- network = parser->CreateNetworkFromString(implicitInputNoShape.c_str(), {{"data", {2, 2, 3, 4} }}, { "data" });
- BOOST_TEST(network.get());
- runtime->LoadNetwork(netId, Optimize(*network, backends, runtime->GetDeviceSpec()));
-
- armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data");
- armnn::TensorInfo inputTensorInfo = inputBindingInfo.second;
- BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first)));
-
- BOOST_TEST(inputTensorInfo.GetShape()[0] == 2);
- BOOST_TEST(inputTensorInfo.GetShape()[1] == 2);
- BOOST_TEST(inputTensorInfo.GetShape()[2] == 3);
- BOOST_TEST(inputTensorInfo.GetShape()[3] == 4);
- }
-
- // Checks exception on incompatible shapes.
- {
- BOOST_CHECK_THROW(parser->CreateNetworkFromString(implicitInput.c_str(), {{"data",{ 2, 2, 3, 2 }}}, {"data"}),
- armnn::ParseException);
- }
-
- // Checks exception when no shape available.
- {
- BOOST_CHECK_THROW(parser->CreateNetworkFromString(implicitInputNoShape.c_str(), {}, { "data" }),
- armnn::ParseException);
- }
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestMul.cpp b/src/armnnCaffeParser/test/TestMul.cpp
deleted file mode 100644
index 3b49015bce..0000000000
--- a/src/armnnCaffeParser/test/TestMul.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct MulFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- MulFixture()
- {
- m_Prototext = "name: \"MinimalMul\"\n"
- "layer {\n"
- " name: \"data\"\n"
- " type: \"Input\"\n"
- " top: \"data\"\n"
- " input_param { shape: { dim: 1 dim: 1 dim: 4 dim: 4 } }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"pool1\"\n"
- " name: \"pool1\"\n"
- " type: \"Pooling\"\n"
- " pooling_param {\n"
- " kernel_size: 2\n"
- " stride: 2\n"
- " pool: MAX\n"
- " }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"pool2\"\n"
- " name: \"pool2\"\n"
- " type: \"Pooling\"\n"
- " pooling_param {\n"
- " kernel_size: 2\n"
- " stride: 2\n"
- " pool: MAX\n"
- " }\n"
- "}\n"
- "layer {\n"
- " bottom: \"pool1\"\n"
- " bottom: \"pool2\"\n"
- " top: \"mul\"\n"
- " name: \"mul\"\n"
- " type: \"Eltwise\"\n"
- " eltwise_param {\n"
- " operation: 0\n"
- " }\n"
- "}\n";
- SetupSingleInputSingleOutput("data", "mul");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(ParseMul, MulFixture)
-{
- RunTest<4>(
- {
- 0, 1, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 1, 0,
- 1, 0, 1, 1
- },
- {
- 1, 0,
- 1, 1
- });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp b/src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp
deleted file mode 100644
index 82b75f400e..0000000000
--- a/src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct MultiInputsOutputsFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- MultiInputsOutputsFixture()
- {
- m_Prototext = R"(
-name: "MultiInputsOutputs"
-layer {
- name: "input1"
- type: "Input"
- top: "input1"
- input_param { shape: { dim: 1 } }
-}
-layer {
- name: "input2"
- type: "Input"
- top: "input2"
- input_param { shape: { dim: 1 } }
-}
-layer {
- bottom: "input1"
- bottom: "input2"
- top: "add1"
- name: "add1"
- type: "Eltwise"
-}
-layer {
- bottom: "input2"
- bottom: "input1"
- top: "add2"
- name: "add2"
- type: "Eltwise"
-}
- )";
- Setup({ }, { "add1", "add2" });
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(MultiInputsOutputs, MultiInputsOutputsFixture)
-{
- RunTest<1>({ { "input1",{ 12.0f } },{ "input2",{ 13.0f } } },
- { { "add1",{ 25.0f } },{ "add2",{ 25.0f } } });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestPooling2d.cpp b/src/armnnCaffeParser/test/TestPooling2d.cpp
deleted file mode 100644
index 55517a0695..0000000000
--- a/src/armnnCaffeParser/test/TestPooling2d.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct GlobalPoolingFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- GlobalPoolingFixture()
- {
- m_Prototext = "name: \"GlobalPooling\"\n"
- "layer {\n"
- " name: \"data\"\n"
- " type: \"Input\"\n"
- " top: \"data\"\n"
- " input_param { shape: { dim: 1 dim: 3 dim: 2 dim: 2 } }\n"
- "}\n"
- "layer {\n"
- " bottom: \"data\"\n"
- " top: \"pool1\"\n"
- " name: \"pool1\"\n"
- " type: \"Pooling\"\n"
- " pooling_param {\n"
- " pool: AVE\n"
- " global_pooling: true\n"
- " }\n"
- "}\n";
- SetupSingleInputSingleOutput("data", "pool1");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(GlobalPooling, GlobalPoolingFixture)
-{
- RunTest<4>(
- {
- 1,3,
- 5,7,
-
- 2,2,
- 2,2,
-
- 4,4,
- 6,6
- },
- {
- 4, 2, 5
- });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnCaffeParser/test/TestSplit.cpp b/src/armnnCaffeParser/test/TestSplit.cpp
deleted file mode 100644
index 048da424cf..0000000000
--- a/src/armnnCaffeParser/test/TestSplit.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include <boost/test/unit_test.hpp>
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "ParserPrototxtFixture.hpp"
-
-BOOST_AUTO_TEST_SUITE(CaffeParser)
-
-struct SplitFixture : public armnnUtils::ParserPrototxtFixture<armnnCaffeParser::ICaffeParser>
-{
- SplitFixture()
- {
- m_Prototext = R"(
-name: "Split"
-layer {
- name: "data"
- type: "Input"
- top: "data"
- input_param { shape: { dim: 1 dim: 1 dim: 1 dim: 1 } }
-}
-layer {
- name: "split"
- type: "Split"
- bottom: "data"
- top: "split_top0"
- top: "split_top1"
-}
-layer {
- bottom: "split_top0"
- bottom: "split_top1"
- top: "add"
- name: "add"
- type: "Eltwise"
-}
- )";
- SetupSingleInputSingleOutput("data", "add");
- }
-};
-
-BOOST_FIXTURE_TEST_CASE(Split, SplitFixture)
-{
- RunTest<1>({ 1.0f }, { 2.0f });
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnConverter/ArmnnConverter.cpp b/src/armnnConverter/ArmnnConverter.cpp
index b5d3256c3c..6f05325bba 100644
--- a/src/armnnConverter/ArmnnConverter.cpp
+++ b/src/armnnConverter/ArmnnConverter.cpp
@@ -4,9 +4,6 @@
//
#include <armnn/Logging.hpp>
-#if defined(ARMNN_CAFFE_PARSER)
-#include <armnnCaffeParser/ICaffeParser.hpp>
-#endif
#if defined(ARMNN_ONNX_PARSER)
#include <armnnOnnxParser/IOnnxParser.hpp>
#endif
@@ -81,9 +78,6 @@ int ParseCommandLineArgs(int argc, char* argv[],
try
{
std::string modelFormatDescription("Format of the model file");
-#if defined(ARMNN_CAFFE_PARSER)
- modelFormatDescription += ", caffe-binary, caffe-text";
-#endif
#if defined(ARMNN_ONNX_PARSER)
modelFormatDescription += ", onnx-binary, onnx-text";
#endif
@@ -336,11 +330,10 @@ private:
int main(int argc, char* argv[])
{
-#if (!defined(ARMNN_CAFFE_PARSER) \
- && !defined(ARMNN_ONNX_PARSER) \
+#if (!defined(ARMNN_ONNX_PARSER) \
&& !defined(ARMNN_TF_PARSER) \
&& !defined(ARMNN_TF_LITE_PARSER))
- ARMNN_LOG(fatal) << "Not built with any of the supported parsers, Caffe, Onnx, Tensorflow, or TfLite.";
+ ARMNN_LOG(fatal) << "Not built with any of the supported parsers Onnx, Tensorflow, or TfLite.";
return EXIT_FAILURE;
#endif
@@ -399,20 +392,7 @@ int main(int argc, char* argv[])
try
{
- if (modelFormat.find("caffe") != std::string::npos)
- {
-#if defined(ARMNN_CAFFE_PARSER)
- if (!converter.CreateNetwork<armnnCaffeParser::ICaffeParser>())
- {
- ARMNN_LOG(fatal) << "Failed to load model from file";
- return EXIT_FAILURE;
- }
-#else
- ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
- return EXIT_FAILURE;
-#endif
- }
- else if (modelFormat.find("onnx") != std::string::npos)
+ if (modelFormat.find("onnx") != std::string::npos)
{
#if defined(ARMNN_ONNX_PARSER)
if (!converter.CreateNetwork<armnnOnnxParser::IOnnxParser>())
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 135f6497be..308b885230 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -16,70 +16,6 @@ target_include_directories(inferenceTest PRIVATE ../src/armnnUtils)
target_include_directories(inferenceTest PRIVATE ../src/backends)
target_include_directories(inferenceTest PRIVATE ../third-party/stb)
-if(BUILD_CAFFE_PARSER)
- macro(CaffeParserTest testName sources)
- add_executable_ex(${testName} ${sources})
- target_include_directories(${testName} PRIVATE ../src/armnnUtils)
- target_include_directories(${testName} PRIVATE ../src/backends)
- set_target_properties(${testName} PROPERTIES COMPILE_FLAGS "${CAFFE_PARSER_TEST_ADDITIONAL_COMPILE_FLAGS}")
-
- target_link_libraries(${testName} inferenceTest)
- target_link_libraries(${testName} armnnCaffeParser)
- target_link_libraries(${testName} armnn)
- target_link_libraries(${testName} ${CMAKE_THREAD_LIBS_INIT})
- addDllCopyCommands(${testName})
- endmacro()
-
- set(CaffeCifar10AcrossChannels-Armnn_sources
- CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp
- Cifar10Database.hpp
- Cifar10Database.cpp)
- CaffeParserTest(CaffeCifar10AcrossChannels-Armnn "${CaffeCifar10AcrossChannels-Armnn_sources}")
-
- set(CaffeMnist-Armnn_sources
- CaffeMnist-Armnn/CaffeMnist-Armnn.cpp
- MnistDatabase.hpp
- MnistDatabase.cpp)
- CaffeParserTest(CaffeMnist-Armnn "${CaffeMnist-Armnn_sources}")
-
- set(CaffeAlexNet-Armnn_sources
- CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp
- CaffePreprocessor.hpp
- CaffePreprocessor.cpp)
- CaffeParserTest(CaffeAlexNet-Armnn "${CaffeAlexNet-Armnn_sources}")
-
- set(MultipleNetworksCifar10_SRC
- MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
- Cifar10Database.hpp
- Cifar10Database.cpp)
- CaffeParserTest(MultipleNetworksCifar10 "${MultipleNetworksCifar10_SRC}")
-
- set(CaffeResNet-Armnn_sources
- CaffeResNet-Armnn/CaffeResNet-Armnn.cpp
- CaffePreprocessor.hpp
- CaffePreprocessor.cpp)
- CaffeParserTest(CaffeResNet-Armnn "${CaffeResNet-Armnn_sources}")
-
- set(CaffeVGG-Armnn_sources
- CaffeVGG-Armnn/CaffeVGG-Armnn.cpp
- CaffePreprocessor.hpp
- CaffePreprocessor.cpp)
- CaffeParserTest(CaffeVGG-Armnn "${CaffeVGG-Armnn_sources}")
-
- set(CaffeInception_BN-Armnn_sources
- CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp
- CaffePreprocessor.hpp
- CaffePreprocessor.cpp)
- CaffeParserTest(CaffeInception_BN-Armnn "${CaffeInception_BN-Armnn_sources}")
-
- set(CaffeYolo-Armnn_sources
- CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
- YoloDatabase.hpp
- YoloDatabase.cpp
- YoloInferenceTest.hpp)
- CaffeParserTest(CaffeYolo-Armnn "${CaffeYolo-Armnn_sources}")
-endif()
-
if(BUILD_TF_PARSER)
macro(TfParserTest testName sources)
add_executable_ex(${testName} ${sources})
@@ -119,8 +55,8 @@ if(BUILD_TF_PARSER)
set(TfResNext-Armnn_sources
TfResNext_Quantized-Armnn/TfResNext_Quantized-Armnn.cpp
- CaffePreprocessor.hpp
- CaffePreprocessor.cpp)
+ ImagePreprocessor.hpp
+ ImagePreprocessor.cpp)
TfParserTest(TfResNext-Armnn "${TfResNext-Armnn_sources}")
endif()
@@ -238,7 +174,7 @@ if (BUILD_ONNX_PARSER)
OnnxParserTest(OnnxMobileNet-Armnn "${OnnxMobileNet-Armnn_sources}")
endif()
-if (BUILD_ARMNN_SERIALIZER OR BUILD_CAFFE_PARSER OR BUILD_TF_PARSER OR BUILD_TF_LITE_PARSER OR BUILD_ONNX_PARSER)
+if (BUILD_ARMNN_SERIALIZER OR BUILD_TF_PARSER OR BUILD_TF_LITE_PARSER OR BUILD_ONNX_PARSER)
set(ExecuteNetwork_sources
ExecuteNetwork/ExecuteNetwork.cpp
ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -257,9 +193,6 @@ if (BUILD_ARMNN_SERIALIZER OR BUILD_CAFFE_PARSER OR BUILD_TF_PARSER OR BUILD_TF_
if (BUILD_ARMNN_SERIALIZER)
target_link_libraries(ExecuteNetwork armnnSerializer)
endif()
- if (BUILD_CAFFE_PARSER)
- target_link_libraries(ExecuteNetwork armnnCaffeParser)
- endif()
if (BUILD_TF_PARSER)
target_link_libraries(ExecuteNetwork armnnTfParser)
endif()
@@ -285,9 +218,6 @@ if(BUILD_ACCURACY_TOOL)
if (BUILD_ARMNN_SERIALIZER)
target_link_libraries(${executorName} armnnSerializer)
endif()
- if (BUILD_CAFFE_PARSER)
- target_link_libraries(${executorName} armnnCaffeParser)
- endif()
if (BUILD_TF_PARSER)
target_link_libraries(${executorName} armnnTfParser)
endif()
diff --git a/tests/CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp b/tests/CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp
deleted file mode 100644
index e14cd61e72..0000000000
--- a/tests/CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../InferenceTest.hpp"
-#include "../CaffePreprocessor.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-int main(int argc, char* argv[])
-{
- int retVal = EXIT_FAILURE;
- try
- {
- using DataType = float;
- using DatabaseType = CaffePreprocessor;
- using ParserType = armnnCaffeParser::ICaffeParser;
- using ModelType = InferenceModel<ParserType, DataType>;
-
- // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
- retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType, ParserType>(
- argc, argv, "bvlc_alexnet_1.caffemodel", true, "data", "prob", { 0 },
- [](const char* dataDir, const ModelType &) {
- return DatabaseType(dataDir);
- });
- }
- catch (const std::exception& e)
- {
- // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
- // exception of type std::length_error.
- // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
- std::cerr << "WARNING: CaffeAlexNet-Armnn: An error has occurred when running the "
- "classifier inference tests: " << e.what() << std::endl;
- }
- return retVal;
-}
diff --git a/tests/CaffeAlexNet-Armnn/Validation.txt b/tests/CaffeAlexNet-Armnn/Validation.txt
deleted file mode 100644
index cb95f050e2..0000000000
--- a/tests/CaffeAlexNet-Armnn/Validation.txt
+++ /dev/null
@@ -1,1000 +0,0 @@
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
diff --git a/tests/CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp b/tests/CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp
deleted file mode 100644
index 7bd98f69da..0000000000
--- a/tests/CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../InferenceTest.hpp"
-#include "../Cifar10Database.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-int main(int argc, char* argv[])
-{
- int retVal = EXIT_FAILURE;
- try
- {
- using DataType = float;
- using DatabaseType = Cifar10Database;
- using ParserType = armnnCaffeParser::ICaffeParser;
- using ModelType = InferenceModel<ParserType, DataType>;
-
- // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
- retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType, ParserType>(
- argc, argv, "cifar10_full_iter_60000.caffemodel", true, "data", "prob",
- { 0, 1, 2, 4, 7 },
- [](const char* dataDir, const ModelType&) {
- return DatabaseType(dataDir);
- });
- }
- catch (const std::exception& e)
- {
- // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
- // exception of type std::length_error.
- // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
- std::cerr << "WARNING: CaffeCifar10AcrossChannels-Armnn: An error has occurred when running "
- "the classifier inference tests: " << e.what() << std::endl;
- }
- return retVal;
-}
diff --git a/tests/CaffeCifar10AcrossChannels-Armnn/Validation.txt b/tests/CaffeCifar10AcrossChannels-Armnn/Validation.txt
deleted file mode 100644
index a7b59465eb..0000000000
--- a/tests/CaffeCifar10AcrossChannels-Armnn/Validation.txt
+++ /dev/null
@@ -1,1000 +0,0 @@
-3
-8
-8
-8
-6
-8
-5
-6
-3
-8
-0
-9
-5
-7
-9
-8
-5
-7
-8
-6
-7
-0
-8
-9
-4
-3
-3
-0
-9
-6
-6
-5
-8
-3
-9
-3
-7
-9
-9
-5
-0
-6
-7
-3
-0
-9
-3
-8
-7
-2
-9
-8
-5
-5
-8
-8
-7
-5
-5
-3
-7
-5
-2
-3
-6
-7
-8
-0
-3
-7
-0
-3
-8
-8
-0
-2
-0
-8
-5
-8
-8
-0
-1
-7
-3
-0
-3
-3
-8
-9
-0
-2
-8
-6
-7
-3
-6
-0
-0
-7
-8
-5
-6
-3
-1
-1
-3
-6
-8
-7
-5
-0
-2
-3
-0
-3
-0
-3
-7
-5
-8
-0
-1
-2
-8
-8
-8
-3
-6
-0
-4
-1
-8
-9
-1
-0
-9
-4
-2
-8
-3
-5
-6
-5
-8
-0
-6
-5
-5
-5
-8
-9
-5
-0
-0
-5
-0
-9
-5
-4
-0
-0
-0
-6
-0
-0
-8
-8
-5
-8
-9
-0
-8
-8
-9
-9
-3
-7
-5
-0
-0
-5
-2
-8
-0
-8
-5
-3
-3
-8
-5
-8
-0
-1
-7
-3
-8
-8
-7
-8
-5
-0
-8
-0
-1
-3
-8
-5
-7
-8
-7
-0
-5
-8
-8
-0
-7
-9
-8
-2
-7
-5
-8
-5
-5
-9
-8
-0
-3
-6
-5
-1
-7
-8
-8
-0
-4
-0
-5
-3
-1
-1
-8
-3
-0
-8
-1
-8
-2
-0
-5
-5
-9
-9
-2
-8
-3
-0
-8
-9
-8
-8
-3
-3
-0
-8
-8
-4
-7
-0
-0
-3
-6
-3
-8
-0
-0
-3
-2
-5
-9
-0
-6
-1
-0
-9
-8
-8
-7
-9
-8
-2
-6
-9
-3
-0
-6
-0
-0
-6
-6
-3
-3
-8
-8
-8
-8
-3
-1
-0
-8
-6
-0
-0
-8
-0
-7
-7
-5
-5
-3
-3
-2
-0
-5
-0
-7
-7
-3
-6
-1
-9
-3
-6
-6
-9
-3
-8
-0
-7
-0
-6
-2
-5
-8
-5
-7
-6
-8
-9
-9
-1
-8
-2
-3
-7
-5
-2
-8
-0
-9
-5
-8
-8
-9
-4
-0
-5
-8
-0
-0
-7
-9
-3
-2
-7
-3
-7
-8
-6
-6
-9
-0
-8
-5
-0
-7
-3
-5
-5
-1
-2
-6
-2
-3
-6
-2
-3
-0
-8
-9
-8
-7
-8
-8
-4
-0
-8
-8
-3
-5
-8
-3
-8
-1
-9
-0
-5
-5
-7
-4
-7
-8
-0
-0
-9
-3
-7
-0
-6
-3
-3
-8
-7
-3
-7
-8
-5
-3
-8
-1
-3
-9
-8
-8
-7
-3
-0
-0
-0
-2
-9
-7
-0
-8
-3
-4
-5
-3
-8
-5
-6
-8
-7
-3
-8
-4
-3
-7
-8
-5
-7
-8
-8
-3
-7
-4
-0
-5
-4
-3
-6
-0
-8
-5
-8
-9
-9
-8
-0
-0
-0
-0
-1
-8
-8
-0
-5
-2
-0
-4
-0
-5
-2
-9
-4
-7
-9
-0
-4
-5
-6
-8
-9
-5
-5
-8
-9
-3
-8
-5
-7
-0
-7
-0
-5
-0
-0
-0
-6
-8
-8
-9
-5
-6
-3
-6
-3
-9
-8
-1
-7
-0
-7
-5
-9
-0
-6
-5
-5
-3
-3
-8
-3
-9
-8
-6
-4
-3
-2
-0
-7
-6
-0
-2
-3
-9
-5
-8
-0
-6
-7
-8
-3
-6
-8
-8
-8
-7
-5
-4
-0
-8
-4
-0
-8
-3
-5
-8
-9
-6
-9
-2
-3
-0
-0
-7
-8
-8
-3
-8
-5
-0
-2
-1
-6
-3
-4
-3
-9
-6
-9
-8
-8
-5
-8
-6
-3
-2
-1
-7
-7
-1
-2
-7
-9
-9
-4
-4
-0
-8
-3
-2
-8
-7
-0
-8
-3
-0
-3
-3
-8
-0
-7
-9
-1
-8
-0
-4
-5
-3
-9
-3
-0
-8
-0
-1
-5
-4
-1
-8
-0
-7
-6
-3
-0
-9
-0
-8
-2
-6
-3
-2
-3
-0
-0
-3
-8
-0
-3
-9
-6
-8
-0
-9
-2
-8
-2
-3
-0
-3
-2
-2
-7
-8
-3
-8
-0
-7
-5
-7
-0
-4
-8
-7
-4
-8
-3
-8
-8
-6
-0
-8
-7
-4
-3
-3
-8
-4
-8
-7
-8
-8
-9
-8
-8
-1
-3
-3
-5
-5
-0
-7
-9
-8
-0
-8
-4
-1
-3
-5
-7
-8
-7
-8
-7
-4
-6
-2
-5
-8
-0
-8
-1
-2
-0
-6
-8
-2
-1
-3
-5
-6
-0
-1
-2
-0
-8
-3
-0
-5
-0
-6
-8
-0
-2
-7
-6
-0
-6
-9
-1
-7
-8
-7
-0
-3
-9
-7
-8
-0
-0
-3
-3
-7
-5
-4
-8
-8
-8
-7
-1
-2
-7
-4
-4
-8
-4
-7
-7
-3
-2
-7
-2
-0
-8
-8
-5
-8
-0
-8
-2
-0
-8
-7
-5
-0
-8
-5
-0
-0
-8
-2
-2
-2
-8
-9
-2
-7
-2
-7
-0
-7
-2
-1
-0
-0
-0
-8
-4
-7
-9
-8
-0
-0
-7
-7
-0
-7
-8
-4
-4
-3
-5
-0
-1
-3
-7
-0
-1
-8
-1
-4
-2
-3
-8
-4
-5
-0
-7
-8
-8
-3
-0
-8
-8
-8
-8
-8
-4
-3
-6
-7
-3
-1
-8
-3
-7
-7
-5
-5
-6
-6
-5
-8
-8
-1
-6
-8
-8
-3
-3
-3
-2
-0
-1
-8
-8
-8
-0
-0
-9
-9
-3
-3
-5
-8
-3
-0
-0
-4
-2
-3
-3
-7
-3
-0
-5
-8
-8
-9
-8
-5
-4
-8
-3
-0
-8
-7
-8
-3
-9
-2
-8
-4
-7
-8
-3
-7
-8
-8
-8
-8
-3
-6
-3
-3
-8
-1
-9
-9
-4
-6
-8
-0
-0
-0
-8
-8
-9
-2
-8
-8
-8
-7
-8
-3
-1
-7
-0
-1
-5
-8
-3
-3
-3
-8
-9
-3
-8
diff --git a/tests/CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp b/tests/CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp
deleted file mode 100644
index c41d0443a9..0000000000
--- a/tests/CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../InferenceTest.hpp"
-#include "../CaffePreprocessor.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-int main(int argc, char* argv[])
-{
- int retVal = EXIT_FAILURE;
- try
- {
- // Coverity fix: The following code may throw an exception of type std::length_error.
- std::vector<ImageSet> imageSet =
- {
- {"shark.jpg", 3694}
- };
-
- using DataType = float;
- using DatabaseType = CaffePreprocessor;
- using ParserType = armnnCaffeParser::ICaffeParser;
- using ModelType = InferenceModel<ParserType, DataType>;
-
- // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
- retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType, ParserType>(
- argc, argv, "Inception-BN-batchsize1.caffemodel", true,
- "data", "softmax", { 0 },
- [&imageSet](const char* dataDir, const ModelType&) {
- return DatabaseType(dataDir, 224, 224, imageSet);
- });
- }
- catch (const std::exception& e)
- {
- // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
- // exception of type std::length_error.
- // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
- std::cerr << "WARNING: CaffeInception_BN-Armnn: An error has occurred when running "
- "the classifier inference tests: " << e.what() << std::endl;
- }
- return retVal;
-}
diff --git a/tests/CaffeInception_BN-Armnn/Validation.txt b/tests/CaffeInception_BN-Armnn/Validation.txt
deleted file mode 100644
index f6040137d8..0000000000
--- a/tests/CaffeInception_BN-Armnn/Validation.txt
+++ /dev/null
@@ -1,1000 +0,0 @@
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
-3694
diff --git a/tests/CaffeMnist-Armnn/CaffeMnist-Armnn.cpp b/tests/CaffeMnist-Armnn/CaffeMnist-Armnn.cpp
deleted file mode 100644
index c79cb78703..0000000000
--- a/tests/CaffeMnist-Armnn/CaffeMnist-Armnn.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../InferenceTest.hpp"
-#include "../MnistDatabase.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-int main(int argc, char* argv[])
-{
- int retVal = EXIT_FAILURE;
- try
- {
- using DataType = float;
- using DatabaseType = MnistDatabase;
- using ParserType = armnnCaffeParser::ICaffeParser;
- using ModelType = InferenceModel<ParserType, DataType>;
-
- // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
- retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType, ParserType>(
- argc, argv, "lenet_iter_9000.caffemodel", true, "data", "prob",
- { 0, 1, 5, 8, 9 },
- [](const char* dataDir, const ModelType&) {
- return DatabaseType(dataDir);
- });
- }
- catch (const std::exception& e)
- {
- // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
- // exception of type std::length_error.
- // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
- std::cerr << "WARNING: CaffeMnist-Armnn: An error has occurred when running "
- "the classifier inference tests: " << e.what() << std::endl;
- }
- return retVal;
-}
diff --git a/tests/CaffeMnist-Armnn/Validation.txt b/tests/CaffeMnist-Armnn/Validation.txt
deleted file mode 100644
index 63cbca6c56..0000000000
--- a/tests/CaffeMnist-Armnn/Validation.txt
+++ /dev/null
@@ -1,1000 +0,0 @@
-7
-2
-1
-0
-4
-1
-4
-9
-5
-9
-0
-6
-9
-0
-1
-5
-9
-7
-3
-4
-9
-6
-6
-5
-4
-0
-7
-4
-0
-1
-3
-1
-3
-4
-7
-2
-7
-1
-2
-1
-1
-7
-4
-2
-3
-5
-1
-2
-4
-4
-6
-3
-5
-5
-6
-0
-4
-1
-9
-5
-7
-8
-9
-3
-7
-4
-6
-4
-3
-0
-7
-0
-2
-9
-1
-7
-3
-2
-9
-7
-7
-6
-2
-7
-8
-4
-7
-3
-6
-1
-3
-6
-9
-3
-1
-4
-1
-7
-6
-9
-6
-0
-5
-4
-9
-9
-2
-1
-9
-4
-8
-7
-3
-9
-7
-9
-4
-4
-9
-2
-5
-4
-7
-6
-7
-9
-0
-5
-8
-5
-6
-6
-5
-7
-8
-1
-0
-1
-6
-4
-6
-7
-3
-1
-7
-1
-8
-2
-0
-2
-9
-9
-5
-5
-1
-5
-6
-0
-3
-4
-4
-6
-5
-4
-6
-5
-4
-5
-1
-4
-4
-7
-2
-3
-2
-7
-1
-8
-1
-8
-1
-8
-5
-0
-8
-9
-2
-5
-0
-1
-1
-1
-0
-9
-0
-3
-1
-6
-4
-2
-3
-6
-1
-1
-1
-3
-9
-5
-2
-9
-4
-5
-9
-3
-9
-0
-3
-6
-5
-5
-7
-2
-2
-7
-1
-2
-8
-4
-1
-7
-3
-3
-8
-8
-7
-9
-2
-2
-4
-1
-5
-9
-8
-7
-2
-3
-0
-4
-4
-2
-4
-1
-9
-5
-7
-7
-2
-8
-2
-6
-8
-5
-7
-7
-9
-1
-8
-1
-8
-0
-3
-0
-1
-9
-9
-4
-1
-8
-2
-1
-2
-9
-7
-5
-9
-2
-6
-4
-1
-5
-8
-2
-9
-2
-0
-4
-0
-0
-2
-8
-4
-7
-1
-2
-4
-0
-2
-7
-4
-3
-3
-0
-0
-3
-1
-9
-6
-5
-2
-5
-9
-2
-9
-3
-0
-4
-2
-0
-7
-1
-1
-2
-1
-5
-3
-3
-9
-7
-8
-6
-3
-6
-1
-3
-8
-1
-0
-5
-1
-3
-1
-5
-5
-6
-1
-8
-5
-1
-7
-9
-4
-6
-2
-2
-5
-0
-6
-5
-6
-3
-7
-2
-0
-8
-8
-5
-4
-1
-1
-4
-0
-3
-3
-7
-6
-1
-6
-2
-1
-9
-2
-8
-6
-1
-9
-5
-2
-5
-4
-4
-2
-8
-3
-8
-2
-4
-5
-0
-3
-1
-7
-7
-5
-7
-9
-7
-1
-9
-2
-1
-4
-2
-9
-2
-0
-4
-9
-1
-4
-8
-1
-8
-4
-5
-9
-8
-8
-3
-7
-6
-0
-0
-3
-0
-2
-0
-6
-4
-9
-5
-3
-3
-2
-3
-9
-1
-2
-6
-8
-0
-5
-6
-6
-6
-3
-8
-8
-2
-7
-5
-8
-9
-6
-1
-8
-4
-1
-2
-5
-9
-1
-9
-7
-5
-4
-0
-8
-9
-9
-1
-0
-5
-2
-3
-7
-8
-9
-4
-0
-6
-3
-9
-5
-2
-1
-3
-1
-3
-6
-5
-7
-4
-2
-2
-6
-3
-2
-6
-5
-4
-8
-9
-7
-1
-3
-0
-3
-8
-3
-1
-9
-3
-4
-4
-6
-4
-2
-1
-8
-2
-5
-4
-8
-8
-4
-0
-0
-2
-3
-2
-7
-3
-0
-8
-7
-4
-4
-7
-9
-6
-9
-0
-9
-8
-0
-4
-6
-0
-6
-3
-5
-4
-8
-3
-3
-9
-3
-3
-3
-7
-8
-0
-2
-2
-1
-7
-0
-6
-5
-4
-3
-8
-0
-9
-6
-3
-8
-0
-9
-9
-6
-8
-6
-8
-5
-7
-8
-6
-0
-2
-4
-0
-2
-2
-3
-1
-9
-7
-5
-8
-0
-8
-4
-6
-2
-6
-7
-9
-3
-2
-9
-8
-2
-2
-9
-2
-7
-3
-5
-9
-1
-8
-0
-2
-0
-5
-2
-1
-3
-7
-6
-7
-1
-2
-5
-8
-0
-3
-7
-1
-4
-0
-9
-1
-8
-6
-7
-7
-4
-3
-4
-9
-1
-9
-5
-1
-7
-3
-9
-7
-6
-9
-1
-3
-3
-8
-3
-3
-6
-7
-2
-8
-5
-8
-5
-1
-1
-4
-4
-3
-1
-0
-7
-7
-0
-7
-9
-4
-4
-8
-5
-5
-4
-0
-8
-2
-7
-0
-8
-4
-8
-0
-4
-0
-6
-1
-7
-3
-2
-6
-7
-2
-6
-9
-3
-1
-4
-6
-2
-5
-4
-2
-0
-6
-2
-1
-7
-3
-4
-1
-0
-5
-4
-3
-1
-1
-7
-4
-9
-9
-4
-8
-4
-0
-2
-4
-5
-1
-1
-6
-4
-7
-1
-9
-4
-2
-4
-1
-5
-5
-3
-8
-3
-1
-4
-5
-6
-8
-9
-4
-1
-5
-3
-8
-0
-3
-2
-5
-1
-2
-8
-3
-4
-4
-0
-8
-8
-3
-3
-1
-7
-3
-5
-9
-6
-3
-2
-6
-1
-3
-6
-0
-7
-2
-1
-7
-1
-4
-2
-4
-2
-1
-7
-9
-6
-1
-1
-2
-4
-8
-1
-7
-7
-4
-8
-0
-9
-3
-1
-3
-1
-0
-7
-7
-0
-3
-5
-5
-2
-7
-6
-6
-9
-2
-8
-3
-5
-2
-2
-5
-6
-0
-8
-2
-9
-2
-8
-8
-8
-8
-7
-4
-9
-3
-0
-6
-6
-3
-2
-1
-3
-2
-2
-9
-3
-0
-0
-5
-7
-8
-3
-4
-4
-6
-0
-2
-9
-1
-4
-7
-4
-7
-3
-9
-8
-8
-4
-7
-1
-2
-1
-2
-2
-3
-2
-3
-2
-3
-9
-1
-7
-4
-0
-3
-5
-5
-8
-6
-3
-2
-6
-7
-6
-6
-3
-2
-7
-9
-1
-1
-7
-5
-6
-4
-9
-5
-1
-3
-3
-4
-7
-8
-9
-1
-1
-6
-9
-1
-4
-4
-5
-4
-0
-6
-2
-2
-3
-1
-5
-1
-2
-0
-3
-8
-1
-2
-6
-7
-1
-6
-2
-3
-9
-0
-1
-2
-2
-0
-8
-9
diff --git a/tests/CaffePreprocessor.cpp b/tests/CaffePreprocessor.cpp
deleted file mode 100644
index 54ce833b72..0000000000
--- a/tests/CaffePreprocessor.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "InferenceTestImage.hpp"
-#include "CaffePreprocessor.hpp"
-
-#include <armnn/utility/NumericCast.hpp>
-
-#include <iostream>
-#include <fcntl.h>
-#include <array>
-
-const std::vector<ImageSet> g_DefaultImageSet =
-{
- {"shark.jpg", 2}
-};
-
-CaffePreprocessor::CaffePreprocessor(const std::string& binaryFileDirectory, unsigned int width, unsigned int height,
- const std::vector<ImageSet>& imageSet)
-: m_BinaryDirectory(binaryFileDirectory)
-, m_Height(height)
-, m_Width(width)
-, m_ImageSet(imageSet.empty() ? g_DefaultImageSet : imageSet)
-{
-}
-
-std::unique_ptr<CaffePreprocessor::TTestCaseData> CaffePreprocessor::GetTestCaseData(unsigned int testCaseId)
-{
- testCaseId = testCaseId % armnn::numeric_cast<unsigned int>(m_ImageSet.size());
- const ImageSet& imageSet = m_ImageSet[testCaseId];
- const std::string fullPath = m_BinaryDirectory + imageSet.first;
-
- InferenceTestImage image(fullPath.c_str());
- image.Resize(m_Width, m_Height, CHECK_LOCATION());
-
- // The model expects image data in BGR format.
- std::vector<float> inputImageData = GetImageDataInArmNnLayoutAsFloatsSubtractingMean(ImageChannelLayout::Bgr,
- image, m_MeanBgr);
-
- // List of labels: https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a
- const unsigned int label = imageSet.second;
- return std::make_unique<TTestCaseData>(label, std::move(inputImageData));
-}
diff --git a/tests/CaffePreprocessor.hpp b/tests/CaffePreprocessor.hpp
deleted file mode 100644
index a57382e618..0000000000
--- a/tests/CaffePreprocessor.hpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include "ClassifierTestCaseData.hpp"
-
-#include <array>
-#include <string>
-#include <vector>
-#include <memory>
-
-/// Caffe requires BGR images, not normalized, mean adjusted and resized using smooth resize of STB library
-
-using ImageSet = std::pair<const std::string, unsigned int>;
-
-class CaffePreprocessor
-{
-public:
- using DataType = float;
- using TTestCaseData = ClassifierTestCaseData<DataType>;
-
- explicit CaffePreprocessor(const std::string& binaryFileDirectory,
- unsigned int width = 227,
- unsigned int height = 227,
- const std::vector<ImageSet>& imageSet = std::vector<ImageSet>());
- std::unique_ptr<TTestCaseData> GetTestCaseData(unsigned int testCaseId);
-
-private:
- unsigned int GetNumImageElements() const { return 3 * m_Width * m_Height; }
- unsigned int GetNumImageBytes() const { return 4 * GetNumImageElements(); }
-
- std::string m_BinaryDirectory;
- unsigned int m_Height;
- unsigned int m_Width;
- // Mean value of the database [B, G, R].
- const std::array<float, 3> m_MeanBgr = {{104.007965f, 116.669472f, 122.675102f}};
- const std::vector<ImageSet> m_ImageSet;
-};
diff --git a/tests/CaffeResNet-Armnn/CaffeResNet-Armnn.cpp b/tests/CaffeResNet-Armnn/CaffeResNet-Armnn.cpp
deleted file mode 100644
index aa814ff44b..0000000000
--- a/tests/CaffeResNet-Armnn/CaffeResNet-Armnn.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../InferenceTest.hpp"
-#include "../CaffePreprocessor.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-int main(int argc, char* argv[])
-{
- int retVal = EXIT_FAILURE;
- try
- {
- // Coverity fix: The following code may throw an exception of type std::length_error.
- std::vector<ImageSet> imageSet =
- {
- {"ILSVRC2012_val_00000018.JPEG", 21 },
- {"shark.jpg", 2}
- };
-
- armnn::TensorShape inputTensorShape({ 1, 3, 224, 224 });
-
- using DataType = float;
- using DatabaseType = CaffePreprocessor;
- using ParserType = armnnCaffeParser::ICaffeParser;
- using ModelType = InferenceModel<ParserType, DataType>;
-
- // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
- retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType, ParserType>(
- argc, argv, "ResNet_50_ilsvrc15_model.caffemodel", true,
- "data", "prob", { 0, 1 },
- [&imageSet](const char* dataDir, const ModelType&) {
- return DatabaseType(dataDir, 224, 224, imageSet);
- }, &inputTensorShape);
- }
- catch (const std::exception& e)
- {
- // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
- // exception of type std::length_error.
- // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
- std::cerr << "WARNING: CaffeResNet-Armnn: An error has occurred when running "
- "the classifier inference tests: " << e.what() << std::endl;
- }
- return retVal;
-}
diff --git a/tests/CaffeResNet-Armnn/Validation.txt b/tests/CaffeResNet-Armnn/Validation.txt
deleted file mode 100644
index b3c5de80b7..0000000000
--- a/tests/CaffeResNet-Armnn/Validation.txt
+++ /dev/null
@@ -1,2000 +0,0 @@
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
-21
-2
diff --git a/tests/CaffeSqueezeNet1_0-Armnn/CaffeSqueezeNet1_0-Armnn.cpp b/tests/CaffeSqueezeNet1_0-Armnn/CaffeSqueezeNet1_0-Armnn.cpp
deleted file mode 100644
index c19abefef8..0000000000
--- a/tests/CaffeSqueezeNet1_0-Armnn/CaffeSqueezeNet1_0-Armnn.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../InferenceTest.hpp"
-#include "../CaffePreprocessor.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-int main(int argc, char* argv[])
-{
- using DataType = float;
- using DatabaseType = CaffePreprocessor;
- using ParserType = armnnCaffeParser::ICaffeParser;
- using ModelType = InferenceModel<ParserType, DataType>;
-
- return armnn::test::ClassifierInferenceTestMain<DatabaseType, ParserType>(
- argc, argv, "squeezenet.caffemodel", true,
- "input", "prob", { 0 },
- [](const char* dataDir, const ModelType &) { return CaffePreprocessor(dataDir); });
-}
diff --git a/tests/CaffeVGG-Armnn/CaffeVGG-Armnn.cpp b/tests/CaffeVGG-Armnn/CaffeVGG-Armnn.cpp
deleted file mode 100644
index 733cc29b27..0000000000
--- a/tests/CaffeVGG-Armnn/CaffeVGG-Armnn.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../InferenceTest.hpp"
-#include "../CaffePreprocessor.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-
-int main(int argc, char* argv[])
-{
- armnn::TensorShape inputTensorShape({ 1, 3, 224, 224 });
- int retVal = EXIT_FAILURE;
- try
- {
- using DataType = float;
- using DatabaseType = CaffePreprocessor;
- using ParserType = armnnCaffeParser::ICaffeParser;
- using ModelType = InferenceModel<ParserType, DataType>;
-
- // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
- retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType, ParserType>(
- argc, argv, "VGG_CNN_S.caffemodel", true,
- "input", "prob", { 0 },
- [](const char* dataDir, const ModelType&) {
- return DatabaseType(dataDir, 224, 224);
- }, &inputTensorShape);
- }
- catch (const std::exception& e)
- {
- // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
- // exception of type std::length_error.
- // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
- std::cerr << "WARNING: CaffeVGG-Armnn: An error has occurred when running "
- "the classifier inference tests: " << e.what() << std::endl;
- }
- return retVal;
-}
diff --git a/tests/CaffeVGG-Armnn/Validation.txt b/tests/CaffeVGG-Armnn/Validation.txt
deleted file mode 100644
index cb95f050e2..0000000000
--- a/tests/CaffeVGG-Armnn/Validation.txt
+++ /dev/null
@@ -1,1000 +0,0 @@
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
-2
diff --git a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
deleted file mode 100644
index d563faaab2..0000000000
--- a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "../YoloInferenceTest.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#include "armnn/TypesUtils.hpp"
-
-int main(int argc, char* argv[])
-{
- armnn::TensorShape inputTensorShape{ { 1, 3, YoloImageHeight, YoloImageWidth } };
-
- using YoloInferenceModel = InferenceModel<armnnCaffeParser::ICaffeParser,
- float>;
-
- int retVal = EXIT_FAILURE;
- try
- {
- // Coverity fix: InferenceTestMain() may throw uncaught exceptions.
- retVal = InferenceTestMain(argc, argv, { 0 },
- [&inputTensorShape]()
- {
- return make_unique<YoloTestCaseProvider<YoloInferenceModel>>(
- [&]
- (const InferenceTestOptions &commonOptions,
- typename YoloInferenceModel::CommandLineOptions modelOptions)
- {
- if (!ValidateDirectory(modelOptions.m_ModelDir))
- {
- return std::unique_ptr<YoloInferenceModel>();
- }
-
- typename YoloInferenceModel::Params modelParams;
- modelParams.m_ModelPath = modelOptions.m_ModelDir + "yolov1_tiny_voc2007_model.caffemodel";
- modelParams.m_InputBindings = { "data" };
- modelParams.m_OutputBindings = { "fc12" };
- modelParams.m_InputShapes = { inputTensorShape };
- modelParams.m_IsModelBinary = true;
- modelParams.m_ComputeDevices = modelOptions.GetComputeDevicesAsBackendIds();
- modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
- modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
-
- return std::make_unique<YoloInferenceModel>(modelParams,
- commonOptions.m_EnableProfiling,
- commonOptions.m_DynamicBackendsPath);
- });
- });
- }
- catch (const std::exception& e)
- {
- // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
- // exception of type std::length_error.
- // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
- std::cerr << "WARNING: CaffeYolo-Armnn: An error has occurred when running "
- "the classifier inference tests: " << e.what() << std::endl;
- }
- return retVal;
-}
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index f812e53e04..8ab286b16b 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -13,9 +13,6 @@
#if defined(ARMNN_SERIALIZER)
#include "armnnDeserializer/IDeserializer.hpp"
#endif
-#if defined(ARMNN_CAFFE_PARSER)
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#endif
#if defined(ARMNN_TF_PARSER)
#include "armnnTfParser/ITfParser.hpp"
#endif
@@ -472,15 +469,6 @@ int main(int argc, const char* argv[])
return EXIT_FAILURE;
#endif
}
- else if (modelFormat.find("caffe") != std::string::npos)
- {
- #if defined(ARMNN_CAFFE_PARSER)
- return MainImpl<armnnCaffeParser::ICaffeParser, float>(ProgramOptions.m_ExNetParams, runtime);
- #else
- ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
- return EXIT_FAILURE;
- #endif
- }
else if (modelFormat.find("onnx") != std::string::npos)
{
#if defined(ARMNN_ONNX_PARSER)
@@ -526,7 +514,7 @@ int main(int argc, const char* argv[])
else
{
ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
- << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
+ << "'. Please include 'tensorflow', 'tflite' or 'onnx'";
return EXIT_FAILURE;
}
}
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 890ab2a658..4e3b5e313d 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -41,14 +41,6 @@ void CheckModelFormat(const std::string& modelFormat)
"built with serialization support.");
#endif
}
- else if (modelFormat.find("caffe") != std::string::npos)
- {
-#if defined(ARMNN_CAFFE_PARSER)
-#else
- throw armnn::InvalidArgumentException("Can't run model in caffe format without a "
- "built with Caffe parser support.");
-#endif
- }
else if (modelFormat.find("onnx") != std::string::npos)
{
#if defined(ARMNN_ONNX_PARSER)
@@ -83,7 +75,7 @@ void CheckModelFormat(const std::string& modelFormat)
else
{
throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
- "Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'",
+ "Please include 'tensorflow', 'tflite' or 'onnx'",
modelFormat));
}
}
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index b52adaa325..7c1db61841 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -173,12 +173,12 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<std::vector<std::string>>())
("f,model-format",
- "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
+ "armnn-binary, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
"tensorflow-text.",
cxxopts::value<std::string>())
("m,model-path",
- "Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx",
+ "Path to model file, e.g. .armnn, , .prototxt, .tflite, .onnx",
cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
("i,input-name",
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index 754d980423..34dbe1e352 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -164,7 +164,7 @@ public:
("f,model-format",
"Format of the intended model file that uses the images."
"Different formats have different image normalization styles."
- "Accepted values (caffe, tensorflow, tflite)",
+ "Accepted values (tensorflow, tflite)",
cxxopts::value<std::string>(m_ModelFormat))
("o,outfile",
"Output raw tensor file path",
@@ -235,11 +235,7 @@ public:
unsigned int GetNewHeight() {return static_cast<unsigned int>(std::stoi(m_NewHeight));}
SupportedFrontend GetModelFormat()
{
- if (m_ModelFormat == "caffe")
- {
- return SupportedFrontend::Caffe;
- }
- else if (m_ModelFormat == "tensorflow")
+ if (m_ModelFormat == "tensorflow")
{
return SupportedFrontend::TensorFlow;
}
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index 4793f822fb..f2ee470a7a 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -24,14 +24,13 @@ struct NormalizationParameters
enum class SupportedFrontend
{
- Caffe = 0,
- TensorFlow = 1,
- TFLite = 2,
+ TensorFlow = 0,
+ TFLite = 1,
};
/** Get normalization parameters.
* Note that different flavours of models and different model data types have different normalization methods.
- * This tool currently only supports Caffe, TF and TFLite models
+ * This tool currently only supports TF and TFLite models
*
* @param[in] modelFormat One of the supported frontends
* @param[in] outputType Output type of the image tensor, also the type of the intended model
@@ -46,8 +45,6 @@ NormalizationParameters GetNormalizationParameters(const SupportedFrontend& mode
normParams.stddev = { 1.0, 1.0, 1.0 };
switch (modelFormat)
{
- case SupportedFrontend::Caffe:
- break;
case SupportedFrontend::TensorFlow:
case SupportedFrontend::TFLite:
default:
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index dca3ab2788..cab594ed48 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -367,7 +367,7 @@ public:
options
.allow_unrecognised_options()
.add_options()
- ("m,model-dir", "Path to directory containing model files (.caffemodel/.prototxt/.tflite)",
+ ("m,model-dir", "Path to directory containing model files (.prototxt/.tflite)",
cxxopts::value<std::string>(cLineOptions.m_ModelDir))
("c,compute", backendsMessage.c_str(),
cxxopts::value<std::vector<std::string>>(cLineOptions.m_ComputeDevices)->default_value("CpuRef"))
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index f8337a5286..345a0fed98 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -76,7 +76,7 @@ int main(int argc, char* argv[])
"Path to armnn format model file",
cxxopts::value<std::string>(modelPath))
("f,model-format",
- "The model format. Supported values: caffe, tensorflow, tflite",
+ "The model format. Supported values: tensorflow, tflite",
cxxopts::value<std::string>(modelFormat))
("i,input-name",
"Identifier of the input tensors in the network separated by comma with no space.",
@@ -312,11 +312,7 @@ int main(int argc, char* argv[])
const unsigned int batchSize = 1;
// Get normalisation parameters
SupportedFrontend modelFrontend;
- if (modelFormat == "caffe")
- {
- modelFrontend = SupportedFrontend::Caffe;
- }
- else if (modelFormat == "tensorflow")
+ if (modelFormat == "tensorflow")
{
modelFrontend = SupportedFrontend::TensorFlow;
}
diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
index 456ff68e7c..9c51d3f0a7 100644
--- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
+++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
@@ -6,7 +6,7 @@
#include "armnn/ArmNN.hpp"
#include "armnn/Utils.hpp"
#include "armnn/INetwork.hpp"
-#include "armnnCaffeParser/ICaffeParser.hpp"
+#include "armnnTfParser/TfParser.hpp"
#include "../Cifar10Database.hpp"
#include "../InferenceTest.hpp"
#include "../InferenceModel.hpp"
@@ -89,7 +89,7 @@ int main(int argc, char* argv[])
return EXIT_FAILURE;
}
- fs::path modelPath = fs::path(modelDir + "/cifar10_full_iter_60000.caffemodel");
+ fs::path modelPath = fs::path(modelDir + "/cifar10_tf.prototxt");
// Create runtime
// This will also load dynamic backend in case that the dynamic backend path is specified
@@ -123,7 +123,7 @@ int main(int argc, char* argv[])
};
std::vector<Net> networks;
- armnnCaffeParser::ICaffeParserPtr parser(armnnCaffeParser::ICaffeParser::Create());
+ armnnTfParser::ITfParserPtr parser(armnnTfParser::ITfParser::Create());
const int networksCount = 4;
for (int i = 0; i < networksCount; ++i)
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index d902d23d86..74c878304d 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -12,9 +12,6 @@
#if defined(ARMNN_SERIALIZER)
#include "armnnDeserializer/IDeserializer.hpp"
#endif
-#if defined(ARMNN_CAFFE_PARSER)
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#endif
#if defined(ARMNN_TF_PARSER)
#include "armnnTfParser/ITfParser.hpp"
#endif
diff --git a/tests/TfResNext_Quantized-Armnn/TfResNext_Quantized-Armnn.cpp b/tests/TfResNext_Quantized-Armnn/TfResNext_Quantized-Armnn.cpp
index c152c0f6c2..bec2771d4b 100644
--- a/tests/TfResNext_Quantized-Armnn/TfResNext_Quantized-Armnn.cpp
+++ b/tests/TfResNext_Quantized-Armnn/TfResNext_Quantized-Armnn.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
#include "../InferenceTest.hpp"
-#include "../CaffePreprocessor.hpp"
+#include "../ImagePreprocessor.hpp"
#include "armnnTfParser/ITfParser.hpp"
int main(int argc, char* argv[])
@@ -21,7 +21,7 @@ int main(int argc, char* argv[])
armnn::TensorShape inputTensorShape({ 1, 3, 224, 224 });
using DataType = float;
- using DatabaseType = CaffePreprocessor;
+ using DatabaseType = ImagePreprocessor<DataType>;;
using ParserType = armnnTfParser::ITfParser;
using ModelType = InferenceModel<ParserType, DataType>;