aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-07-03 12:06:23 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit12be7ab4876f77fecfab903df70791623219b3da (patch)
tree1cfa6852e60948bee9db0831a9f3abc97a2031c8
parente39334c15c7fd141bb8173d5017ea5ca157fca2c (diff)
downloadComputeLibrary-12be7ab4876f77fecfab903df70791623219b3da.tar.gz
COMPMID-1310: Create graph validation executables.
Change-Id: I9e0b57b1b83fe5a95777cdaeddba6ecef650bafc Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138697 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/utils/misc/Utility.h15
-rw-r--r--arm_compute/graph/Graph.h13
-rw-r--r--arm_compute/graph/TypeLoader.h105
-rw-r--r--arm_compute/graph/TypePrinter.h239
-rw-r--r--arm_compute/graph/algorithms/BFS.h2
-rw-r--r--arm_compute/graph/detail/ExecutionHelpers.h8
-rw-r--r--docs/00_introduction.dox12
-rw-r--r--examples/SConscript1
-rw-r--r--examples/cl_convolution.cpp4
-rw-r--r--examples/cl_events.cpp4
-rw-r--r--examples/cl_sgemm.cpp4
-rw-r--r--examples/gc_absdiff.cpp4
-rw-r--r--examples/gc_dc.cpp6
-rw-r--r--examples/graph_alexnet.cpp109
-rw-r--r--examples/graph_googlenet.cpp106
-rw-r--r--examples/graph_inception_v3.cpp104
-rw-r--r--examples/graph_inception_v4.cpp114
-rw-r--r--examples/graph_lenet.cpp86
-rw-r--r--examples/graph_mobilenet.cpp336
-rw-r--r--examples/graph_mobilenet_qasymm8.cpp239
-rw-r--r--examples/graph_resnet50.cpp106
-rw-r--r--examples/graph_resnext50.cpp97
-rw-r--r--examples/graph_squeezenet.cpp107
-rw-r--r--examples/graph_squeezenet_v1_1.cpp106
-rw-r--r--examples/graph_vgg16.cpp105
-rw-r--r--examples/graph_vgg19.cpp106
-rw-r--r--examples/neon_cartoon_effect.cpp4
-rw-r--r--examples/neon_cnn.cpp6
-rw-r--r--examples/neon_convolution.cpp4
-rw-r--r--examples/neon_copy_objects.cpp6
-rw-r--r--examples/neon_scale.cpp4
-rw-r--r--examples/neoncl_scale_median_gaussian.cpp4
-rw-r--r--src/graph/Graph.cpp10
-rw-r--r--src/graph/GraphManager.cpp33
-rw-r--r--src/graph/Tensor.cpp4
-rw-r--r--src/graph/TypeLoader.cpp89
-rw-r--r--src/graph/detail/ExecutionHelpers.cpp25
-rw-r--r--tests/SConscript6
-rw-r--r--tests/benchmark_examples/RunExample.cpp8
-rw-r--r--tests/framework/command_line/CommandLineParser.h117
-rw-r--r--tests/framework/command_line/CommonOptions.cpp3
-rw-r--r--tests/framework/command_line/CommonOptions.h34
-rw-r--r--tests/framework/command_line/Option.cpp68
-rw-r--r--tests/framework/command_line/ToggleOption.cpp64
-rw-r--r--tests/main.cpp24
-rw-r--r--tests/validate_examples/RunExample.cpp12
-rw-r--r--utils/CommonGraphOptions.cpp190
-rw-r--r--utils/CommonGraphOptions.h111
-rw-r--r--utils/GraphUtils.cpp108
-rw-r--r--utils/GraphUtils.h134
-rw-r--r--utils/ImageLoader.h26
-rw-r--r--utils/Utils.cpp41
-rw-r--r--utils/Utils.h25
-rw-r--r--utils/command_line/CommandLineOptions.h (renamed from tests/framework/command_line/CommandLineOptions.h)8
-rw-r--r--utils/command_line/CommandLineParser.h (renamed from tests/framework/command_line/CommandLineParser.cpp)101
-rw-r--r--utils/command_line/EnumListOption.h (renamed from tests/framework/command_line/EnumListOption.h)13
-rw-r--r--utils/command_line/EnumOption.h (renamed from tests/framework/command_line/EnumOption.h)13
-rw-r--r--utils/command_line/ListOption.h (renamed from tests/framework/command_line/ListOption.h)13
-rw-r--r--utils/command_line/Option.h (renamed from tests/framework/command_line/Option.h)50
-rw-r--r--utils/command_line/SimpleOption.h (renamed from tests/framework/command_line/SimpleOption.h)13
-rw-r--r--utils/command_line/ToggleOption.h (renamed from tests/framework/command_line/ToggleOption.h)44
61 files changed, 1770 insertions, 1783 deletions
diff --git a/arm_compute/core/utils/misc/Utility.h b/arm_compute/core/utils/misc/Utility.h
index a2784a2fc0..0a9f180b4c 100644
--- a/arm_compute/core/utils/misc/Utility.h
+++ b/arm_compute/core/utils/misc/Utility.h
@@ -192,6 +192,21 @@ inline bool check_aligned(void *ptr, const size_t alignment)
{
return (reinterpret_cast<std::uintptr_t>(ptr) % alignment) == 0;
}
+
+/** Convert string to lower case.
+ *
+ * @param[in] string To be converted string.
+ *
+ * @return Lower case string.
+ */
+inline std::string tolower(std::string string)
+{
+ std::transform(string.begin(), string.end(), string.begin(), [](unsigned char c)
+ {
+ return std::tolower(c);
+ });
+ return string;
+}
} // namespace utility
} // namespace arm_compute
#endif /* __ARM_COMPUTE_MISC_UTILITY_H__ */
diff --git a/arm_compute/graph/Graph.h b/arm_compute/graph/Graph.h
index 16f5f97986..2a776826e5 100644
--- a/arm_compute/graph/Graph.h
+++ b/arm_compute/graph/Graph.h
@@ -72,7 +72,7 @@ public:
* @tparam NT Node operation
* @tparam Ts Arguments to operation
*
- * @param args Node arguments
+ * @param[in] args Node arguments
*
* @return ID of the node
*/
@@ -114,9 +114,11 @@ public:
GraphID id() const;
/** Returns graph input nodes
*
- * @return vector containing the graph inputs
+ * @param[in] type Type of nodes to return
+ *
+ * @return vector containing the graph node of given type
*/
- const std::vector<NodeID> &inputs();
+ const std::vector<NodeID> &nodes(NodeType type);
/** Returns nodes of graph
*
* @warning Nodes can be nullptr if they have been removed during the mutation steps of the graph
@@ -238,10 +240,7 @@ inline NodeID Graph::add_node(Ts &&... args)
node->set_id(nid);
// Keep track of input nodes
- if(node->type() == NodeType::Input)
- {
- _tagged_nodes[NodeType::Input].push_back(nid);
- }
+ _tagged_nodes[node->type()].push_back(nid);
// Associate a new tensor with each output
for(auto &output : node->_outputs)
diff --git a/arm_compute/graph/TypeLoader.h b/arm_compute/graph/TypeLoader.h
new file mode 100644
index 0000000000..77f096133d
--- /dev/null
+++ b/arm_compute/graph/TypeLoader.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_TYPE_LOADER_H__
+#define __ARM_COMPUTE_GRAPH_TYPE_LOADER_H__
+
+#include "arm_compute/graph/Types.h"
+
+#include <istream>
+
+namespace arm_compute
+{
+/** Converts a string to a strong types enumeration @ref DataType
+ *
+ * @param[in] name String to convert
+ *
+ * @return Converted DataType enumeration
+ */
+arm_compute::DataType data_type_from_name(const std::string &name);
+
+/** Input Stream operator for @ref DataType
+ *
+ * @param[in] stream Stream to parse
+ * @param[out] data_type Output data type
+ *
+ * @return Updated stream
+ */
+inline ::std::istream &operator>>(::std::istream &stream, arm_compute::DataType &data_type)
+{
+ std::string value;
+ stream >> value;
+ data_type = data_type_from_name(value);
+ return stream;
+}
+
+/** Converts a string to a strong types enumeration @ref DataLayout
+ *
+ * @param[in] name String to convert
+ *
+ * @return Converted DataLayout enumeration
+ */
+arm_compute::DataLayout data_layout_from_name(const std::string &name);
+
+/** Input Stream operator for @ref DataLayout
+ *
+ * @param[in] stream Stream to parse
+ * @param[out] data_layout Output data layout
+ *
+ * @return Updated stream
+ */
+inline ::std::istream &operator>>(::std::istream &stream, arm_compute::DataLayout &data_layout)
+{
+ std::string value;
+ stream >> value;
+ data_layout = data_layout_from_name(value);
+ return stream;
+}
+
+namespace graph
+{
+/** Converts a string to a strong types enumeration @ref Target
+ *
+ * @param[in] name String to convert
+ *
+ * @return Converted Target enumeration
+ */
+Target target_from_name(const std::string &name);
+
+/** Input Stream operator for @ref Target
+ *
+ * @param[in] stream Stream to parse
+ * @param[out] target Output target
+ *
+ * @return Updated stream
+ */
+inline ::std::istream &operator>>(::std::istream &stream, Target &target)
+{
+ std::string value;
+ stream >> value;
+ target = target_from_name(value);
+ return stream;
+}
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_TYPE_LOADER_H__ */
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index 177a5e2f38..c3601f2373 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -28,89 +28,12 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/graph/Types.h"
+#include "utils/TypePrinter.h"
+
namespace arm_compute
{
namespace graph
{
-/** Formatted output of the Dimensions type. */
-template <typename T>
-inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::Dimensions<T> &dimensions)
-{
- if(dimensions.num_dimensions() > 0)
- {
- os << dimensions[0];
-
- for(unsigned int d = 1; d < dimensions.num_dimensions(); ++d)
- {
- os << "x" << dimensions[d];
- }
- }
-
- return os;
-}
-
-/** Formatted output of the Size2D type. */
-inline ::std::ostream &operator<<(::std::ostream &os, const Size2D &size)
-{
- os << size.width << "x" << size.height;
-
- return os;
-}
-
-/** Formatted output of the DataType type. */
-inline ::std::ostream &operator<<(::std::ostream &os, const DataType &data_type)
-{
- switch(data_type)
- {
- case DataType::UNKNOWN:
- os << "UNKNOWN";
- break;
- case DataType::U8:
- os << "U8";
- break;
- case DataType::QASYMM8:
- os << "QASYMM8";
- break;
- case DataType::S8:
- os << "S8";
- break;
- case DataType::U16:
- os << "U16";
- break;
- case DataType::S16:
- os << "S16";
- break;
- case DataType::U32:
- os << "U32";
- break;
- case DataType::S32:
- os << "S32";
- break;
- case DataType::U64:
- os << "U64";
- break;
- case DataType::S64:
- os << "S64";
- break;
- case DataType::F16:
- os << "F16";
- break;
- case DataType::F32:
- os << "F32";
- break;
- case DataType::F64:
- os << "F64";
- break;
- case DataType::SIZET:
- os << "SIZET";
- break;
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-
- return os;
-}
-
/** Formatted output of the Target. */
inline ::std::ostream &operator<<(::std::ostream &os, const Target &target)
{
@@ -135,24 +58,6 @@ inline ::std::ostream &operator<<(::std::ostream &os, const Target &target)
return os;
}
-/** Formatted output of the DataLayout */
-inline ::std::ostream &operator<<(::std::ostream &os, const DataLayout &data_layout)
-{
- switch(data_layout)
- {
- case DataLayout::NCHW:
- os << "NCHW";
- break;
- case DataLayout::NHWC:
- os << "NHWC";
- break;
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-
- return os;
-}
-
inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
{
switch(node_type)
@@ -224,100 +129,6 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
return os;
}
-/** Formatted output of the activation function type. */
-inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo::ActivationFunction &act_function)
-{
- switch(act_function)
- {
- case ActivationLayerInfo::ActivationFunction::ABS:
- os << "ABS";
- break;
- case ActivationLayerInfo::ActivationFunction::LINEAR:
- os << "LINEAR";
- break;
- case ActivationLayerInfo::ActivationFunction::LOGISTIC:
- os << "LOGISTIC";
- break;
- case ActivationLayerInfo::ActivationFunction::RELU:
- os << "RELU";
- break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- os << "BOUNDED_RELU";
- break;
- case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- os << "LEAKY_RELU";
- break;
- case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
- os << "SOFT_RELU";
- break;
- case ActivationLayerInfo::ActivationFunction::SQRT:
- os << "SQRT";
- break;
- case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
- os << "LU_BOUNDED_RELU";
- break;
- case ActivationLayerInfo::ActivationFunction::SQUARE:
- os << "SQUARE";
- break;
- case ActivationLayerInfo::ActivationFunction::TANH:
- os << "TANH";
- break;
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-
- return os;
-}
-
-inline std::string to_string(const ActivationLayerInfo::ActivationFunction &act_function)
-{
- std::stringstream str;
- str << act_function;
- return str.str();
-}
-
-/** Formatted output of the PoolingType type. */
-inline ::std::ostream &operator<<(::std::ostream &os, const PoolingType &pool_type)
-{
- switch(pool_type)
- {
- case PoolingType::AVG:
- os << "AVG";
- break;
- case PoolingType::MAX:
- os << "MAX";
- break;
- case PoolingType::L2:
- os << "L2";
- break;
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-
- return os;
-}
-
-/** Formatted output of the NormType type. */
-inline ::std::ostream &operator<<(::std::ostream &os, const NormType &norm_type)
-{
- switch(norm_type)
- {
- case NormType::CROSS_MAP:
- os << "CROSS_MAP";
- break;
- case NormType::IN_MAP_1D:
- os << "IN_MAP_1D";
- break;
- case NormType::IN_MAP_2D:
- os << "IN_MAP_2D";
- break;
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-
- return os;
-}
-
/** Formatted output of the EltwiseOperation type. */
inline ::std::ostream &operator<<(::std::ostream &os, const EltwiseOperation &eltwise_op)
{
@@ -401,52 +212,6 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DepthwiseConvolution
return os;
}
-
-/** Formatted output of the PadStrideInfo type. */
-inline ::std::ostream &operator<<(::std::ostream &os, const PadStrideInfo &pad_stride_info)
-{
- os << pad_stride_info.stride().first << "," << pad_stride_info.stride().second;
- os << ";";
- os << pad_stride_info.pad_left() << "," << pad_stride_info.pad_right() << ","
- << pad_stride_info.pad_top() << "," << pad_stride_info.pad_bottom();
-
- return os;
-}
-
-/** Formatted output of the QuantizationInfo type. */
-inline ::std::ostream &operator<<(::std::ostream &os, const QuantizationInfo &quantization_info)
-{
- os << "Scale:" << quantization_info.scale << "~"
- << "Offset:" << quantization_info.offset;
- return os;
-}
-
-/** Formatted output of the Interpolation policy type.
- *
- * @param[out] os Output stream.
- * @param[in] policy Interpolation policy to output.
- *
- * @return Modified output stream.
- */
-inline ::std::ostream &operator<<(::std::ostream &os, const InterpolationPolicy &policy)
-{
- switch(policy)
- {
- case InterpolationPolicy::NEAREST_NEIGHBOR:
- os << "NEAREST NEIGHBOR";
- break;
- case InterpolationPolicy::BILINEAR:
- os << "BILINEAR";
- break;
- case InterpolationPolicy::AREA:
- os << "AREA";
- break;
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-
- return os;
-}
} // namespace graph
} // namespace arm_compute
#endif /* __ARM_COMPUTE_GRAPH_TYPE_PRINTER_H__ */
diff --git a/arm_compute/graph/algorithms/BFS.h b/arm_compute/graph/algorithms/BFS.h
index 36ca872f15..97292d733b 100644
--- a/arm_compute/graph/algorithms/BFS.h
+++ b/arm_compute/graph/algorithms/BFS.h
@@ -85,7 +85,7 @@ inline std::vector<NodeID> bfs(Graph &g)
std::list<NodeID> queue;
// Push inputs and mark as visited
- for(auto &input : g.inputs())
+ for(auto &input : g.nodes(NodeType::Input))
{
if(input != EmptyNodeID)
{
diff --git a/arm_compute/graph/detail/ExecutionHelpers.h b/arm_compute/graph/detail/ExecutionHelpers.h
index 23dd207695..3a357776e4 100644
--- a/arm_compute/graph/detail/ExecutionHelpers.h
+++ b/arm_compute/graph/detail/ExecutionHelpers.h
@@ -95,13 +95,17 @@ void call_all_const_node_accessors(Graph &g);
/** Call all input node accessors
*
* @param[in] workload Workload to execute
+ *
+ * @return True if all the accesses were valid
*/
-void call_all_input_node_accessors(ExecutionWorkload &workload);
+bool call_all_input_node_accessors(ExecutionWorkload &workload);
/** Call all output node accessors
*
* @param[in] workload Workload to execute
+ *
+ * @return True if all the accessors expect more data
*/
-void call_all_output_node_accessors(ExecutionWorkload &workload);
+bool call_all_output_node_accessors(ExecutionWorkload &workload);
/** Prepares all tasks for execution
*
* @param[in] workload Workload to prepare
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index c6c0ab2ac5..3b93d85524 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -778,11 +778,11 @@ To cross compile the examples with the Graph API, such as graph_lenet.cpp, you n
i.e. to cross compile the "graph_lenet" example for Linux 32bit:
- arm-linux-gnueabihf-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ arm-linux-gnueabihf-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
i.e. to cross compile the "graph_lenet" example for Linux 64bit:
- aarch64-linux-gnu-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp -I. -Iinclude -std=c++11 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ aarch64-linux-gnu-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
@@ -811,11 +811,11 @@ To compile natively the examples with the Graph API, such as graph_lenet.cpp, yo
i.e. to natively compile the "graph_lenet" example for Linux 32bit:
- g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
i.e. to natively compile the "graph_lenet" example for Linux 64bit:
- g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp -I. -Iinclude -std=c++11 L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
(notice the only difference with the 32 bit command is that we don't need the -mfpu option)
@@ -918,9 +918,9 @@ To cross compile the examples with the Graph API, such as graph_lenet.cpp, you n
(notice the compute library has to be built with both neon and opencl enabled - neon=1 and opencl=1)
#32 bit:
- arm-linux-androideabi-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp -I. -Iinclude -std=c++11 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
+ arm-linux-androideabi-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
#64 bit:
- aarch64-linux-android-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp -I. -Iinclude -std=c++11 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
+ aarch64-linux-android-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
@note Due to some issues in older versions of the Mali OpenCL DDK (<= r13p0), we recommend to link arm_compute statically on Android.
@note When linked statically the arm_compute_graph library currently needs the --whole-archive linker flag in order to work properly
diff --git a/examples/SConscript b/examples/SConscript
index c3576fb1a0..bada734659 100644
--- a/examples/SConscript
+++ b/examples/SConscript
@@ -47,6 +47,7 @@ else:
# Build graph examples
graph_utils = examples_env.Object("../utils/GraphUtils.cpp")
+graph_utils += examples_env.Object("../utils/CommonGraphOptions.cpp")
examples_libs = examples_env.get("LIBS",[])
for file in Glob("./graph_*.cpp"):
example = os.path.basename(os.path.splitext(str(file))[0])
diff --git a/examples/cl_convolution.cpp b/examples/cl_convolution.cpp
index 8f3d7e3a30..b15bbb6cb4 100644
--- a/examples/cl_convolution.cpp
+++ b/examples/cl_convolution.cpp
@@ -57,7 +57,7 @@ const int16_t gaussian5x5[] =
class CLConvolutionExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
PPMLoader ppm;
@@ -95,6 +95,8 @@ public:
ppm.fill_image(src);
output_filename = std::string(argv[1]) + "_out.ppm";
}
+
+ return true;
}
void do_run() override
{
diff --git a/examples/cl_events.cpp b/examples/cl_events.cpp
index 4a46df961c..a9c508ac58 100644
--- a/examples/cl_events.cpp
+++ b/examples/cl_events.cpp
@@ -37,7 +37,7 @@ using namespace utils;
class CLEventsExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
/** [OpenCL events] **/
PPMLoader ppm;
@@ -84,6 +84,8 @@ public:
output_filename = std::string(argv[1]) + "_out.ppm";
}
/** [OpenCL events] **/
+
+ return true;
}
void do_run() override
{
diff --git a/examples/cl_sgemm.cpp b/examples/cl_sgemm.cpp
index fa57885450..805aec1cf3 100644
--- a/examples/cl_sgemm.cpp
+++ b/examples/cl_sgemm.cpp
@@ -39,7 +39,7 @@ using namespace utils;
class CLSGEMMExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
NPYLoader npy0, npy1, npy2;
alpha = 1.0f;
@@ -164,6 +164,8 @@ public:
// Dummy run for CLTuner
sgemm.run();
+
+ return true;
}
void do_run() override
{
diff --git a/examples/gc_absdiff.cpp b/examples/gc_absdiff.cpp
index 1024dace26..f53459231f 100644
--- a/examples/gc_absdiff.cpp
+++ b/examples/gc_absdiff.cpp
@@ -38,7 +38,7 @@ using namespace utils;
class GCAbsDiffExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
PPMLoader ppm1, ppm2;
@@ -90,6 +90,8 @@ public:
{
ppm2.fill_image(src2);
}
+
+ return true;
}
void do_run() override
{
diff --git a/examples/gc_dc.cpp b/examples/gc_dc.cpp
index 8b6f4414e0..f3f194252e 100644
--- a/examples/gc_dc.cpp
+++ b/examples/gc_dc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ using namespace utils;
class GCDCExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
ARM_COMPUTE_UNUSED(argc);
ARM_COMPUTE_UNUSED(argv);
@@ -86,6 +86,8 @@ public:
*reinterpret_cast<half_float::half *>(it.ptr()) = half_float::half(1.f);
});
src.unmap();
+
+ return true;
}
void do_run() override
{
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index 5328662b6d..95d36342f9 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -23,13 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <iostream>
-#include <memory>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -37,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement AlexNet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphAlexnetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphAlexnetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "AlexNet")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
+ // Return when help menu is requested
+ if(common_params.help)
{
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
11U, 11U, 96U,
@@ -169,12 +142,15 @@ public:
.set_name("fc8")
// Softmax
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -183,13 +159,18 @@ public:
}
private:
- Stream graph{ 0, "AlexNet" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for AlexNet
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
+ *
+ * @return Return code
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index 2dba67f5eb..e23107f081 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement Googlenet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphGooglenetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphGooglenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "GoogleNet")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
<< ConvolutionLayer(
7U, 7U, 64U,
get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv1/conv1_7x7_s2_w.npy"),
@@ -140,12 +114,15 @@ public:
get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_w.npy"),
get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_b.npy"))
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -154,7 +131,10 @@ public:
}
private:
- Stream graph{ 0, "GoogleNet" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
BranchLayer get_inception_node(const std::string &data_path, std::string &&param_path,
unsigned int a_filt,
@@ -215,7 +195,7 @@ private:
/** Main program for Googlenet
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index d1d6ab4e05..30b1b7d7ce 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,70 +34,46 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement InceptionV3's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class InceptionV3Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ InceptionV3Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "InceptionV3")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor), false))
<< ConvolutionLayer(3U, 3U, 32U,
get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -218,12 +192,15 @@ public:
.set_name("Logits/Conv2d_1c_1x1/convolution")
<< ReshapeLayer(TensorShape(1001U)).set_name("Predictions/Reshape")
<< SoftmaxLayer().set_name("Predictions/Softmax")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
@@ -232,7 +209,10 @@ public:
}
private:
- Stream graph{ 0, "InceptionV3" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
private:
BranchLayer get_inception_node_A(const std::string &data_path, std::string &&param_path,
@@ -863,7 +843,7 @@ private:
/** Main program for Inception V3
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index ed95baa99e..e7c1bc69e2 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,73 +34,46 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement InceptionV4's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class InceptionV4Example final : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ InceptionV4Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "InceptionV4")
{
- // Disabled the test for now because the process gets killed on Linux Firefly 32 bit even when using ConvolutionMethodHint::DIRECT.
- // Needs to review/rework to run the code below.
-#if __aarch64__
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor), false))
// Conv2d_1a_3x3
<< ConvolutionLayer(3U, 3U, 32U,
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Conv2d_1a_3x3_weights.npy"),
@@ -165,28 +136,27 @@ public:
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_weights.npy"),
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_biases.npy"))
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
-#else /* __aarch64__ */
- using namespace arm_compute;
- ARM_COMPUTE_UNUSED(argc);
- ARM_COMPUTE_UNUSED(argv);
-#endif /* __aarch64__ */
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
-#if __aarch64__
graph.run();
-#endif /* __aarch64__ */
}
private:
- Stream graph{ 0, "InceptionV4" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
private:
BranchLayer get_mixed_3a(const std::string &data_path)
@@ -747,7 +717,7 @@ private:
/** Main program for Inception V4
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 32c75827d3..f90892aeee 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -22,13 +22,11 @@
* SOFTWARE.
*/
#include "arm_compute/graph.h"
-
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -41,55 +39,39 @@ using namespace arm_compute::graph_utils;
class GraphLenetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphLenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "LeNet")
{
- std::string data_path; /** Path to the trainable data */
- unsigned int batches = 4; /** Number of batches */
-
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [batches] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [batches] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- //Do something with argv[1]
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " [path_to_data] [batches] [fast_math_hint]\n\n";
- std::cout << "No number of batches where specified, thus will use the default : " << batches << "\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- batches = std::strtol(argv[3], nullptr, 0);
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- //Do something with argv[1] and argv[2]
- data_path = argv[2];
- batches = std::strtol(argv[3], nullptr, 0);
- fast_math_hint = (std::strtol(argv[4], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+ unsigned int batches = 4; /** Number of batches */
+
//conv1 << pool1 << conv2 << pool2 << fc1 << act1 << fc2 << smx
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(28U, 28U, 1U, batches), DataType::F32), get_input_accessor(""))
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(28U, 28U, 1U, batches), common_params.data_type), get_input_accessor(common_params))
<< ConvolutionLayer(
5U, 5U, 20U,
get_weights_accessor(data_path, "/cnn_data/lenet_model/conv1_w.npy"),
@@ -116,12 +98,15 @@ public:
get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_b.npy"))
.set_name("ip2")
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(""));
+ << OutputLayer(get_output_accessor(common_params));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -130,7 +115,10 @@ public:
}
private:
- Stream graph{ 0, "LeNet" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for LeNet
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index 40243bb111..9304b2b380 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -23,11 +23,11 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
+using namespace arm_compute;
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,107 +35,112 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement MobileNet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Model ID (0 = MobileNetV1_1.0_224, 1 = MobileNetV1_0.75_160), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] data layout, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
-class GraphMobilenetExample : public Example
+class GraphMobilenetExample : public Example // NOLINT
{
public:
- void do_setup(int argc, char **argv) override
+ GraphMobilenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "MobileNetV1")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ // Add model id option
+ model_id_opt = cmd_parser.add_option<SimpleOption<int>>("model-id", 0);
+ model_id_opt->set_help("Mobilenet model id (0: 1.0_224, else: 0.75_160");
+ }
+ /** Prevent instances of this class from being copy constructed */
+ GraphMobilenetExample(const GraphMobilenetExample &) = delete;
+ /** Prevent instances of this class from being copied */
+ GraphMobilenetExample &operator=(const GraphMobilenetExample &) = delete;
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- DepthwiseConvolutionMethod depthwise_convolution_hint = DepthwiseConvolutionMethod::OPTIMIZED_3x3;
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
-
- // Set model to execute. 0 (MobileNetV1_1.0_224), 1 (MobileNetV1_0.75_160)
- int model_id = (argc > 2) ? std::strtol(argv[2], nullptr, 10) : 0;
- ARM_COMPUTE_ERROR_ON_MSG(model_id > 1, "Invalid model ID. Model must be 0 (MobileNetV1_1.0_224) or 1 (MobileNetV1_0.75_160)");
- int layout_id = (argc > 3) ? std::strtol(argv[3], nullptr, 10) : 0;
- ARM_COMPUTE_ERROR_ON_MSG(layout_id > 1, "Invalid layout ID. Layout must be 0 (NCHW) or 1 (NHWC)");
-
- float depth_scale = (model_id == 0) ? 1.f : 0.75;
- unsigned int spatial_size = (model_id == 0) ? 224 : 160;
- std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/";
- TensorDescriptor input_descriptor_nchw = TensorDescriptor(TensorShape(spatial_size, spatial_size, 3U, 1U), DataType::F32);
- TensorDescriptor input_descriptor_nhwc = TensorDescriptor(TensorShape(3U, spatial_size, spatial_size, 1U), DataType::F32).set_layout(DataLayout::NHWC);
- TensorDescriptor input_descriptor = (layout_id == 0) ? input_descriptor_nchw : input_descriptor_nhwc;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- else if(argc == 3)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data layout provided: using NCHW\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 6)
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get model parameters
+ int model_id = model_id_opt->value();
+
+ // Create input descriptor
+ unsigned int spatial_size = (model_id == 0 || common_params.data_type == DataType::QASYMM8) ? 224 : 160;
+ TensorShape tensor_shape = TensorShape(spatial_size, spatial_size, 3U, 1U);
+ if(common_params.data_layout == DataLayout::NHWC)
{
- data_path = argv[4];
- image = argv[5];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
+ arm_compute::permute(tensor_shape, arm_compute::PermutationVector(2U, 0U, 1U));
}
- else if(argc == 7)
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ // Set graph hints
+ graph << common_params.target
+ << DepthwiseConvolutionMethod::OPTIMIZED_3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
+ << common_params.fast_math_hint;
+
+ // Create core graph
+ if(arm_compute::is_data_type_float(common_params.data_type))
{
- data_path = argv[4];
- image = argv[5];
- label = argv[6];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
+ create_graph_float(input_descriptor, model_id);
}
else
{
- data_path = argv[4];
- image = argv[5];
- label = argv[6];
- fast_math_hint = (std::strtol(argv[7], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ create_graph_qasymm(input_descriptor);
}
+ // Create common tail
+ graph << ReshapeLayer(TensorShape(1001U)).set_name("Reshape")
+ << SoftmaxLayer().set_name("Softmax")
+ << OutputLayer(get_output_accessor(common_params, 5));
+
+ // Finalize graph
+ GraphConfig config;
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
+ }
+ void do_run() override
+ {
+ // Run graph
+ graph.run();
+ }
+
+private:
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ SimpleOption<int> *model_id_opt{ nullptr };
+ CommonGraphParams common_params;
+ Stream graph;
+
+ void create_graph_float(TensorDescriptor &input_descriptor, int model_id)
+ {
+ float depth_scale = (model_id == 0) ? 1.f : 0.75;
+ std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/";
+
+ // Create a preprocessor object
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
// Add model path to data path
if(!data_path.empty())
{
data_path += model_path;
}
- graph << target_hint
- << depthwise_convolution_hint
- << fast_math_hint
- << InputLayer(input_descriptor,
- get_input_accessor(image, std::move(preprocessor), false))
+ graph << InputLayer(input_descriptor,
+ get_input_accessor(common_params, std::move(preprocessor), false))
<< ConvolutionLayer(
3U, 3U, 32U * depth_scale,
get_weights_accessor(data_path, "Conv2d_0_weights.npy", DataLayout::NCHW),
@@ -150,47 +155,122 @@ public:
0.001f)
.set_name("Conv2d_0/BatchNorm")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f)).set_name("Conv2d_0/Relu6");
- graph << get_dwsc_node(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_4", 256 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_5", 256 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_6", 512 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_7", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_8", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_9", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_10", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_11", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_12", 1024 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
- graph << get_dwsc_node(data_path, "Conv2d_13", 1024 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_4", 256 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_5", 256 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_6", 512 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_7", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_8", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_9", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_10", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_11", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_12", 1024 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
+ graph << get_dwsc_node_float(data_path, "Conv2d_13", 1024 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0));
graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("Logits/AvgPool_1a")
<< ConvolutionLayer(
1U, 1U, 1001U,
get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_weights.npy", DataLayout::NCHW),
get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_biases.npy"),
PadStrideInfo(1, 1, 0, 0))
- .set_name("Logits/Conv2d_1c_1x1")
- << ReshapeLayer(TensorShape(1001U)).set_name("Reshape")
- << SoftmaxLayer().set_name("Softmax")
- << OutputLayer(get_output_accessor(label, 5));
-
- // Finalize graph
- GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ .set_name("Logits/Conv2d_1c_1x1");
}
- void do_run() override
+
+ void create_graph_qasymm(TensorDescriptor &input_descriptor)
{
- // Run graph
- graph.run();
- }
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
-private:
- Stream graph{ 0, "MobileNetV1" };
+ // Quantization info taken from the AndroidNN QASYMM8 MobileNet example
+ const QuantizationInfo in_quant_info = QuantizationInfo(0.0078125f, 128);
+ const QuantizationInfo mid_quant_info = QuantizationInfo(0.0784313753247f, 128);
+
+ const std::vector<QuantizationInfo> conv_weights_quant_info =
+ {
+ QuantizationInfo(0.031778190285f, 156), // conv0
+ QuantizationInfo(0.00604454148561f, 66) // conv14
+ };
+
+ const std::vector<QuantizationInfo> depth_weights_quant_info =
+ {
+ QuantizationInfo(0.254282623529f, 129), // dwsc1
+ QuantizationInfo(0.12828284502f, 172), // dwsc2
+ QuantizationInfo(0.265911251307f, 83), // dwsc3
+ QuantizationInfo(0.0985597148538f, 30), // dwsc4
+ QuantizationInfo(0.0631204470992f, 54), // dwsc5
+ QuantizationInfo(0.0137207424268f, 141), // dwsc6
+ QuantizationInfo(0.0817828401923f, 125), // dwsc7
+ QuantizationInfo(0.0393880493939f, 164), // dwsc8
+ QuantizationInfo(0.211694166064f, 129), // dwsc9
+ QuantizationInfo(0.158015936613f, 103), // dwsc10
+ QuantizationInfo(0.0182712618262f, 137), // dwsc11
+ QuantizationInfo(0.0127998134121f, 134), // dwsc12
+ QuantizationInfo(0.299285322428f, 161) // dwsc13
+ };
- BranchLayer get_dwsc_node(const std::string &data_path, std::string &&param_path,
- unsigned int conv_filt,
- PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
+ const std::vector<QuantizationInfo> point_weights_quant_info =
+ {
+ QuantizationInfo(0.0425766184926f, 129), // dwsc1
+ QuantizationInfo(0.0250773020089f, 94), // dwsc2
+ QuantizationInfo(0.015851572156f, 93), // dwsc3
+ QuantizationInfo(0.0167811904103f, 98), // dwsc4
+ QuantizationInfo(0.00951790809631f, 135), // dwsc5
+ QuantizationInfo(0.00999817531556f, 128), // dwsc6
+ QuantizationInfo(0.00590536883101f, 126), // dwsc7
+ QuantizationInfo(0.00576109671965f, 133), // dwsc8
+ QuantizationInfo(0.00830461271107f, 142), // dwsc9
+ QuantizationInfo(0.0152327232063f, 72), // dwsc10
+ QuantizationInfo(0.00741417845711f, 125), // dwsc11
+ QuantizationInfo(0.0135628981516f, 142), // dwsc12
+ QuantizationInfo(0.0338749065995f, 140) // dwsc13
+ };
+
+ graph << InputLayer(input_descriptor.set_quantization_info(in_quant_info),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/" + common_params.image))
+ << ConvolutionLayer(
+ 3U, 3U, 32U,
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_weights.npy"),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_bias.npy"),
+ PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR),
+ 1, conv_weights_quant_info.at(0), mid_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_1", 64U, PadStrideInfo(1U, 1U, 1U, 1U), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(0), point_weights_quant_info.at(0));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_2", 128U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(1),
+ point_weights_quant_info.at(1));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_3", 128U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(2),
+ point_weights_quant_info.at(2));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_4", 256U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(3),
+ point_weights_quant_info.at(3));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_5", 256U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(4),
+ point_weights_quant_info.at(4));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_6", 512U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(5),
+ point_weights_quant_info.at(5));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_7", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(6),
+ point_weights_quant_info.at(6));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_8", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(7),
+ point_weights_quant_info.at(7));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_9", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(8),
+ point_weights_quant_info.at(8));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_10", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(9),
+ point_weights_quant_info.at(9));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_11", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(10),
+ point_weights_quant_info.at(10));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_12", 1024U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(11),
+ point_weights_quant_info.at(11));
+ graph << get_dwsc_node_qasymm(data_path, "Conv2d_13", 1024U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(12),
+ point_weights_quant_info.at(12))
+ << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
+ << ConvolutionLayer(
+ 1U, 1U, 1001U,
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_weights.npy"),
+ get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_bias.npy"),
+ PadStrideInfo(1U, 1U, 0U, 0U), 1, conv_weights_quant_info.at(1));
+ }
+
+ BranchLayer get_dwsc_node_float(const std::string &data_path, std::string &&param_path,
+ unsigned int conv_filt,
+ PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
{
std::string total_path = param_path + "_";
SubStream sg(graph);
@@ -225,18 +305,36 @@ private:
return BranchLayer(std::move(sg));
}
+
+ BranchLayer get_dwsc_node_qasymm(const std::string &data_path, std::string &&param_path,
+ const unsigned int conv_filt,
+ PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info,
+ QuantizationInfo depth_weights_quant_info, QuantizationInfo point_weights_quant_info)
+ {
+ std::string total_path = "/cnn_data/mobilenet_qasymm8_model/" + param_path + "_";
+ SubStream sg(graph);
+
+ sg << DepthwiseConvolutionLayer(
+ 3U, 3U,
+ get_weights_accessor(data_path, total_path + "depthwise_weights.npy"),
+ get_weights_accessor(data_path, total_path + "depthwise_bias.npy"),
+ dwc_pad_stride_info, depth_weights_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f))
+ << ConvolutionLayer(
+ 1U, 1U, conv_filt,
+ get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
+ get_weights_accessor(data_path, total_path + "pointwise_bias.npy"),
+ conv_pad_stride_info, 1, point_weights_quant_info)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
+
+ return BranchLayer(std::move(sg));
+ }
};
/** Main program for MobileNetV1
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner),
- * [optional] Model ID (0 = MobileNetV1_1.0_224, 1 = MobileNetV1_0.75_160),
- * [optional] Path to the weights folder,
- * [optional] image,
- * [optional] labels,
- * [optional] data layout,
- * [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_mobilenet_qasymm8.cpp b/examples/graph_mobilenet_qasymm8.cpp
deleted file mode 100644
index 2801209985..0000000000
--- a/examples/graph_mobilenet_qasymm8.cpp
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphUtils.h"
-#include "utils/Utils.h"
-
-#include <cstdlib>
-
-using namespace arm_compute;
-using namespace arm_compute::utils;
-using namespace arm_compute::graph::frontend;
-using namespace arm_compute::graph_utils;
-
-/** Example demonstrating how to implement QASYMM8 MobileNet's network using the Compute Library's graph API
- *
- * @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] npy_input, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
- */
-class GraphMobileNetQASYMM8Example : public Example
-{
-public:
- void do_setup(int argc, char **argv) override
- {
- std::string data_path; /* Path to the trainable data */
- std::string input; /* Image data */
- std::string label; /* Label data */
-
- // Quantization info taken from the AndroidNN QASYMM8 MobileNet example
- const QuantizationInfo in_quant_info = QuantizationInfo(0.0078125f, 128);
- const QuantizationInfo mid_quant_info = QuantizationInfo(0.0784313753247f, 128);
-
- const std::vector<QuantizationInfo> conv_weights_quant_info =
- {
- QuantizationInfo(0.031778190285f, 156), // conv0
- QuantizationInfo(0.00604454148561f, 66) // conv14
- };
-
- const std::vector<QuantizationInfo> depth_weights_quant_info =
- {
- QuantizationInfo(0.254282623529f, 129), // dwsc1
- QuantizationInfo(0.12828284502f, 172), // dwsc2
- QuantizationInfo(0.265911251307f, 83), // dwsc3
- QuantizationInfo(0.0985597148538f, 30), // dwsc4
- QuantizationInfo(0.0631204470992f, 54), // dwsc5
- QuantizationInfo(0.0137207424268f, 141), // dwsc6
- QuantizationInfo(0.0817828401923f, 125), // dwsc7
- QuantizationInfo(0.0393880493939f, 164), // dwsc8
- QuantizationInfo(0.211694166064f, 129), // dwsc9
- QuantizationInfo(0.158015936613f, 103), // dwsc10
- QuantizationInfo(0.0182712618262f, 137), // dwsc11
- QuantizationInfo(0.0127998134121f, 134), // dwsc12
- QuantizationInfo(0.299285322428f, 161) // dwsc13
- };
-
- const std::vector<QuantizationInfo> point_weights_quant_info =
- {
- QuantizationInfo(0.0425766184926f, 129), // dwsc1
- QuantizationInfo(0.0250773020089f, 94), // dwsc2
- QuantizationInfo(0.015851572156f, 93), // dwsc3
- QuantizationInfo(0.0167811904103f, 98), // dwsc4
- QuantizationInfo(0.00951790809631f, 135), // dwsc5
- QuantizationInfo(0.00999817531556f, 128), // dwsc6
- QuantizationInfo(0.00590536883101f, 126), // dwsc7
- QuantizationInfo(0.00576109671965f, 133), // dwsc8
- QuantizationInfo(0.00830461271107f, 142), // dwsc9
- QuantizationInfo(0.0152327232063f, 72), // dwsc10
- QuantizationInfo(0.00741417845711f, 125), // dwsc11
- QuantizationInfo(0.0135628981516f, 142), // dwsc12
- QuantizationInfo(0.0338749065995f, 140) // dwsc13
- };
-
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
-
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [npy_input] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [npy_input] [labels] [fast_math_hint]\n\n";
- std::cout << "No input provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- input = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- input = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- input = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
- }
-
- graph << target_hint
- << DepthwiseConvolutionMethod::OPTIMIZED_3x3 // FIXME(COMPMID-1073): Add heuristics to automatically call the optimized 3x3 method
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::QASYMM8, in_quant_info),
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/" + input))
- << ConvolutionLayer(
- 3U, 3U, 32U,
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_weights.npy"),
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Conv2d_0_bias.npy"),
- PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR),
- 1, conv_weights_quant_info.at(0), mid_quant_info)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
- graph << get_dwsc_node(data_path, "Conv2d_1", 64U, PadStrideInfo(1U, 1U, 1U, 1U), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(0), point_weights_quant_info.at(0));
- graph << get_dwsc_node(data_path, "Conv2d_2", 128U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(1),
- point_weights_quant_info.at(1));
- graph << get_dwsc_node(data_path, "Conv2d_3", 128U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(2),
- point_weights_quant_info.at(2));
- graph << get_dwsc_node(data_path, "Conv2d_4", 256U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(3),
- point_weights_quant_info.at(3));
- graph << get_dwsc_node(data_path, "Conv2d_5", 256U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(4),
- point_weights_quant_info.at(4));
- graph << get_dwsc_node(data_path, "Conv2d_6", 512U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(5),
- point_weights_quant_info.at(5));
- graph << get_dwsc_node(data_path, "Conv2d_7", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(6),
- point_weights_quant_info.at(6));
- graph << get_dwsc_node(data_path, "Conv2d_8", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(7),
- point_weights_quant_info.at(7));
- graph << get_dwsc_node(data_path, "Conv2d_9", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(8),
- point_weights_quant_info.at(8));
- graph << get_dwsc_node(data_path, "Conv2d_10", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(9),
- point_weights_quant_info.at(9));
- graph << get_dwsc_node(data_path, "Conv2d_11", 512U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(10),
- point_weights_quant_info.at(10));
- graph << get_dwsc_node(data_path, "Conv2d_12", 1024U, PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(11),
- point_weights_quant_info.at(11));
- graph << get_dwsc_node(data_path, "Conv2d_13", 1024U, PadStrideInfo(1U, 1U, 1U, 1U, 1U, 1U, DimensionRoundingType::FLOOR), PadStrideInfo(1U, 1U, 0U, 0U), depth_weights_quant_info.at(12),
- point_weights_quant_info.at(12))
- << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
- << ConvolutionLayer(
- 1U, 1U, 1001U,
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_weights.npy"),
- get_weights_accessor(data_path, "/cnn_data/mobilenet_qasymm8_model/Logits_Conv2d_1c_1x1_bias.npy"),
- PadStrideInfo(1U, 1U, 0U, 0U), 1, conv_weights_quant_info.at(1))
- << ReshapeLayer(TensorShape(1001U))
- << SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
-
- // Finalize graph
- GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
- }
- void do_run() override
- {
- // Run graph
- graph.run();
- }
-
-private:
- Stream graph{ 0, "MobileNetV1_QASYMM8" };
-
- /** This function produces a depthwise separable convolution node (i.e. depthwise + pointwise layers) with ReLU6 activation after each layer.
- *
- * @param[in] data_path Path to trainable data folder
- * @param[in] param_path Prefix of specific set of weights/biases data
- * @param[in] conv_filt Filters depths for pointwise convolution
- * @param[in] dwc_pad_stride_info PadStrideInfo for depthwise convolution
- * @param[in] conv_pad_stride_info PadStrideInfo for pointwise convolution
- * @param[in] depth_weights_quant_info QuantizationInfo for depthwise convolution's weights
- * @param[in] point_weights_quant_info QuantizationInfo for pointwise convolution's weights
- *
- * @return The complete dwsc node
- */
- BranchLayer get_dwsc_node(const std::string &data_path, std::string &&param_path,
- const unsigned int conv_filt,
- PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info,
- QuantizationInfo depth_weights_quant_info, QuantizationInfo point_weights_quant_info)
- {
- std::string total_path = "/cnn_data/mobilenet_qasymm8_model/" + param_path + "_";
- SubStream sg(graph);
-
- sg << DepthwiseConvolutionLayer(
- 3U, 3U,
- get_weights_accessor(data_path, total_path + "depthwise_weights.npy"),
- get_weights_accessor(data_path, total_path + "depthwise_bias.npy"),
- dwc_pad_stride_info, depth_weights_quant_info)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f))
- << ConvolutionLayer(
- 1U, 1U, conv_filt,
- get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
- get_weights_accessor(data_path, total_path + "pointwise_bias.npy"),
- conv_pad_stride_info, 1, point_weights_quant_info)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f));
-
- return BranchLayer(std::move(sg));
- }
-};
-/** Main program for MobileNetQASYMM8
- *
- * @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] npy_input, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
- */
-int main(int argc, char **argv)
-{
- return arm_compute::utils::run_example<GraphMobileNetQASYMM8Example>(argc, argv);
-}
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index bafa9a5852..66fc6e869d 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -23,11 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,72 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement ResNet50 network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphResNet50Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphResNet50Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNet50")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb,
- false /* Do not convert to BGR */);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
+ // Return when help menu is requested
+ if(common_params.help)
{
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false /* Do not convert to BGR */))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb,
+ false /* Do not convert to BGR */);
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor), false /* Do not convert to BGR */))
<< ConvolutionLayer(
7U, 7U, 64U,
get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_weights.npy"),
@@ -131,12 +105,15 @@ public:
.set_name("logits/convolution")
<< FlattenLayer().set_name("predictions/Reshape")
<< SoftmaxLayer().set_name("predictions/Softmax")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
@@ -146,7 +123,10 @@ public:
}
private:
- Stream graph{ 0, "ResNet50" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
void add_residual_block(const std::string &data_path, const std::string &name, unsigned int base_depth, unsigned int num_units, unsigned int stride)
{
@@ -252,7 +232,7 @@ private:
/** Main program for ResNet50
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_resnext50.cpp b/examples/graph_resnext50.cpp
index f96a02e6d6..c0a2308a1f 100644
--- a/examples/graph_resnext50.cpp
+++ b/examples/graph_resnext50.cpp
@@ -23,11 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,67 +34,43 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement ResNeXt50 network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] npy_in, [optional] npy_out, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphResNeXt50Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphResNeXt50Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNeXt50")
+ {
+ }
+ bool do_setup(int argc, char **argv) override
{
- std::string data_path; /* Path to the trainable data */
- std::string npy_in; /* Input npy data */
- std::string npy_out; /* Output npy data */
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [npy_in] [npy_out] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [npy_in] [npy_out] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [npy_in] [npy_out] [fast_math_hint]\n\n";
- std::cout << "No input npy file provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- npy_in = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [npy_out] [fast_math_hint]\n\n";
- std::cout << "No output npy file provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- npy_in = argv[3];
- npy_out = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- npy_in = argv[3];
- npy_out = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(npy_in))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params))
<< ScaleLayer(get_weights_accessor(data_path, "/cnn_data/resnext50_model/bn_data_mul.npy"),
get_weights_accessor(data_path, "/cnn_data/resnext50_model/bn_data_add.npy"))
.set_name("bn_data/Scale")
@@ -115,12 +90,15 @@ public:
graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("pool1")
<< FlattenLayer().set_name("predictions/Reshape")
- << OutputLayer(get_npy_output_accessor(npy_out, TensorShape(2048U), DataType::F32));
+ << OutputLayer(get_npy_output_accessor(common_params.labels, TensorShape(2048U), DataType::F32));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
@@ -130,7 +108,10 @@ public:
}
private:
- Stream graph{ 0, "ResNeXt50" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
void add_residual_block(const std::string &data_path, unsigned int base_depth, unsigned int stage, unsigned int num_units, unsigned int stride_conv_unit1)
{
@@ -200,7 +181,7 @@ private:
/** Main program for ResNeXt50
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [[optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] npy_in, [optional] npy_out )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index b632688839..a290b91148 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -23,85 +23,58 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
-using namespace arm_compute::logging;
/** Example demonstrating how to implement Squeezenet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphSqueezenetExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphSqueezenetExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "SqueezeNetV1")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
<< ConvolutionLayer(
7U, 7U, 96U,
get_weights_accessor(data_path, "/cnn_data/squeezenet_v1.0_model/conv1_w.npy"),
@@ -176,12 +149,15 @@ public:
<< PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
<< FlattenLayer()
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -190,7 +166,10 @@ public:
}
private:
- Stream graph{ 0, "SqueezeNetV1" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
{
@@ -218,7 +197,7 @@ private:
/** Main program for Squeezenet v1.0
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index b2c5a442cd..8ce928c5b1 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -23,12 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-#include <tuple>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -36,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement Squeezenet's v1.1 network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphSqueezenet_v1_1Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphSqueezenet_v1_1Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "SqueezeNetV1.1")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
<< ConvolutionMethod::DIRECT
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -177,12 +151,15 @@ public:
<< PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
<< FlattenLayer()
<< SoftmaxLayer()
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -191,7 +168,10 @@ public:
}
private:
- Stream graph{ 0, "SqueezeNetV1.1" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
{
@@ -219,7 +199,7 @@ private:
/** Main program for Squeezenet v1.1
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index d70c56eadd..5ff306507f 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -23,11 +23,10 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
@@ -35,71 +34,47 @@ using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement VGG16's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphVGG16Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphVGG16Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "VGG16")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
+ // Return when help menu is requested
+ if(common_params.help)
{
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
- {
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -231,12 +206,15 @@ public:
.set_name("fc8")
// Softmax
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -245,13 +223,16 @@ public:
}
private:
- Stream graph{ 0, "VGG16" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for VGG16
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index 8a0ec6fdbd..8bf88b96ed 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -23,83 +23,57 @@
*/
#include "arm_compute/graph.h"
#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
-#include <cstdlib>
-
using namespace arm_compute::utils;
using namespace arm_compute::graph::frontend;
using namespace arm_compute::graph_utils;
-
/** Example demonstrating how to implement VGG19's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
class GraphVGG19Example : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ GraphVGG19Example()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "VGG19")
{
- std::string data_path; /* Path to the trainable data */
- std::string image; /* Image data */
- std::string label; /* Label data */
-
- // Create a preprocessor object
- const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint(target);
- FastMathHint fast_math_hint = FastMathHint::DISABLED;
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
- // Parse arguments
- if(argc < 2)
- {
- // Print help
- std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 2)
- {
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No data folder provided: using random values\n\n";
- }
- else if(argc == 3)
+ // Return when help menu is requested
+ if(common_params.help)
{
- data_path = argv[2];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n";
- std::cout << "No image provided: using random values\n\n";
- }
- else if(argc == 4)
- {
- data_path = argv[2];
- image = argv[3];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n";
- std::cout << "No text file with labels provided: skipping output accessor\n\n";
- }
- else if(argc == 5)
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n";
- std::cout << "No fast math info provided: disabling fast math\n\n";
- }
- else
- {
- data_path = argv[2];
- image = argv[3];
- label = argv[4];
- fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::DISABLED : FastMathHint::ENABLED;
+ cmd_parser.print_help(argv[0]);
+ return false;
}
- graph << target_hint
- << fast_math_hint
- << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ // Checks
+ ARM_COMPUTE_ERROR_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "Unsupported data type!");
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Create a preprocessor object
+ const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
+ std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), common_params.data_type),
+ get_input_accessor(common_params, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -244,12 +218,15 @@ public:
.set_name("fc8")
// Softmax
<< SoftmaxLayer().set_name("prob")
- << OutputLayer(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(common_params, 5));
// Finalize graph
GraphConfig config;
- config.use_tuner = (target == 2);
- graph.finalize(target_hint, config);
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ graph.finalize(common_params.target, config);
+
+ return true;
}
void do_run() override
{
@@ -258,13 +235,16 @@ public:
}
private:
- Stream graph{ 0, "VGG19" };
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
};
/** Main program for VGG19
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) )
+ * @param[in] argv Arguments
*/
int main(int argc, char **argv)
{
diff --git a/examples/neon_cartoon_effect.cpp b/examples/neon_cartoon_effect.cpp
index e6e0f34154..4285aa41e3 100644
--- a/examples/neon_cartoon_effect.cpp
+++ b/examples/neon_cartoon_effect.cpp
@@ -34,7 +34,7 @@ using namespace utils;
class NEONCartoonEffectExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
// Open PPM file
PPMLoader ppm;
@@ -75,6 +75,8 @@ public:
ppm.fill_image(src_img);
output_filename = std::string(argv[1]) + "_out.ppm";
}
+
+ return true;
}
void do_run() override
diff --git a/examples/neon_cnn.cpp b/examples/neon_cnn.cpp
index 05b6c832bc..1df81256b9 100644
--- a/examples/neon_cnn.cpp
+++ b/examples/neon_cnn.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ using namespace utils;
class NEONCNNExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
ARM_COMPUTE_UNUSED(argc);
ARM_COMPUTE_UNUSED(argv);
@@ -227,6 +227,8 @@ public:
// Finalize the manager. (Validity checks, memory allocations etc)
mm_transitions->finalize();
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neon_convolution.cpp b/examples/neon_convolution.cpp
index 8efb932081..1a7e865908 100644
--- a/examples/neon_convolution.cpp
+++ b/examples/neon_convolution.cpp
@@ -53,7 +53,7 @@ const int16_t gaussian5x5[] =
class NEONConvolutionExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
/** [Accurate padding] **/
PPMLoader ppm;
@@ -94,6 +94,8 @@ public:
output_filename = std::string(argv[1]) + "_out.ppm";
}
/** [Accurate padding] **/
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neon_copy_objects.cpp b/examples/neon_copy_objects.cpp
index 9409cf366c..84a2abd379 100644
--- a/examples/neon_copy_objects.cpp
+++ b/examples/neon_copy_objects.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ using namespace utils;
class NEONCopyObjectsExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
ARM_COMPUTE_UNUSED(argc);
ARM_COMPUTE_UNUSED(argv);
@@ -135,6 +135,8 @@ public:
output_it);
/** [Copy objects example] */
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neon_scale.cpp b/examples/neon_scale.cpp
index 252bfc9ae7..b04d916aaf 100644
--- a/examples/neon_scale.cpp
+++ b/examples/neon_scale.cpp
@@ -33,7 +33,7 @@ using namespace utils;
class NEONScaleExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
PPMLoader ppm;
@@ -72,6 +72,8 @@ public:
ppm.fill_image(src);
output_filename = std::string(argv[1]) + "_out.ppm";
}
+
+ return true;
}
void do_run() override
{
diff --git a/examples/neoncl_scale_median_gaussian.cpp b/examples/neoncl_scale_median_gaussian.cpp
index 173575c061..1b26517d9f 100644
--- a/examples/neoncl_scale_median_gaussian.cpp
+++ b/examples/neoncl_scale_median_gaussian.cpp
@@ -43,7 +43,7 @@ using namespace utils;
class NEONCLScaleMedianGaussianExample : public Example
{
public:
- void do_setup(int argc, char **argv) override
+ bool do_setup(int argc, char **argv) override
{
/** [NEON / OpenCL Interop] */
PPMLoader ppm;
@@ -88,6 +88,8 @@ public:
const std::string output_filename = std::string(argv[1]) + "_out.ppm";
}
/** [NEON / OpenCL Interop] */
+
+ return true;
}
void do_run() override
{
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index e1ffeed668..ed24f18943 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -41,9 +41,9 @@ bool Graph::remove_node(NodeID nid)
std::unique_ptr<INode> &node = _nodes[nid];
- // Remove node connections
if(node)
{
+ // Remove node connections
for(auto &input_eid : node->_input_edges)
{
remove_connection(input_eid);
@@ -52,6 +52,10 @@ bool Graph::remove_node(NodeID nid)
{
remove_connection(outpud_eid);
}
+
+ // Remove nid from tagged nodes
+ std::vector<NodeID> &tnodes = _tagged_nodes.at(node->type());
+ tnodes.erase(std::remove(tnodes.begin(), tnodes.end(), nid), tnodes.end());
}
node = nullptr;
@@ -164,9 +168,9 @@ GraphID Graph::id() const
return _id;
}
-const std::vector<NodeID> &Graph::inputs()
+const std::vector<NodeID> &Graph::nodes(NodeType type)
{
- return _tagged_nodes[NodeType::Input];
+ return _tagged_nodes[type];
}
std::vector<std::unique_ptr<INode>> &Graph::nodes()
diff --git a/src/graph/GraphManager.cpp b/src/graph/GraphManager.cpp
index 10661ea275..db6650cf69 100644
--- a/src/graph/GraphManager.cpp
+++ b/src/graph/GraphManager.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/Logger.h"
#include "arm_compute/graph/PassManager.h"
+#include "arm_compute/graph/TypePrinter.h"
#include "arm_compute/graph/Utils.h"
#include "arm_compute/graph/detail/CrossLayerMemoryManagerHelpers.h"
#include "arm_compute/graph/detail/ExecutionHelpers.h"
@@ -53,7 +54,12 @@ void GraphManager::finalize_graph(Graph &graph, GraphContext &ctx, PassManager &
// Force target to all graph construct
// TODO (geopin01) : Support heterogeneous execution
- Target forced_target = is_target_supported(target) ? target : get_default_target();
+ Target forced_target = target;
+ if(!is_target_supported(target))
+ {
+ forced_target = get_default_target();
+ ARM_COMPUTE_LOG_GRAPH_INFO("Switching target from " << target << " to " << forced_target << std::endl);
+ }
force_target_to_graph(graph, forced_target);
// Configure all tensors
@@ -103,14 +109,23 @@ void GraphManager::execute_graph(Graph &graph)
auto it = _workloads.find(graph.id());
ARM_COMPUTE_ERROR_ON_MSG(it == std::end(_workloads), "Graph is not registered!");
- // Call input accessors
- detail::call_all_input_node_accessors(it->second);
-
- // Run graph
- detail::call_all_tasks(it->second);
-
- // Call output accessors
- detail::call_all_output_node_accessors(it->second);
+ while(true)
+ {
+ // Call input accessors
+ if(!detail::call_all_input_node_accessors(it->second))
+ {
+ return;
+ }
+
+ // Run graph
+ detail::call_all_tasks(it->second);
+
+ // Call output accessors
+ if(!detail::call_all_output_node_accessors(it->second))
+ {
+ return;
+ }
+ }
}
void GraphManager::invalidate_graph(Graph &graph)
diff --git a/src/graph/Tensor.cpp b/src/graph/Tensor.cpp
index ef253feb2c..98501280af 100644
--- a/src/graph/Tensor.cpp
+++ b/src/graph/Tensor.cpp
@@ -90,12 +90,12 @@ bool Tensor::call_accessor()
}
// Call accessor
- _accessor->access_tensor(_handle->tensor());
+ bool retval = _accessor->access_tensor(_handle->tensor());
// Unmap tensor
_handle->unmap();
- return true;
+ return retval;
}
void Tensor::bind_edge(EdgeID eid)
diff --git a/src/graph/TypeLoader.cpp b/src/graph/TypeLoader.cpp
new file mode 100644
index 0000000000..30a3546821
--- /dev/null
+++ b/src/graph/TypeLoader.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWNISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/TypeLoader.h"
+
+#include "arm_compute/core/utils/misc/Utility.h"
+
+#include <map>
+
+namespace arm_compute
+{
+arm_compute::DataType data_type_from_name(const std::string &name)
+{
+ static const std::map<std::string, arm_compute::DataType> data_types =
+ {
+ { "f16", DataType::F16 },
+ { "f32", DataType::F32 },
+ { "qasymm8", DataType::QASYMM8 },
+ };
+
+ try
+ {
+ return data_types.at(arm_compute::utility::tolower(name));
+ }
+ catch(const std::out_of_range &)
+ {
+ throw std::invalid_argument(name);
+ }
+}
+
+arm_compute::DataLayout data_layout_from_name(const std::string &name)
+{
+ static const std::map<std::string, arm_compute::DataLayout> data_layouts =
+ {
+ { "nhwc", DataLayout::NHWC },
+ { "nchw", DataLayout::NCHW },
+ };
+
+ try
+ {
+ return data_layouts.at(arm_compute::utility::tolower(name));
+ }
+ catch(const std::out_of_range &)
+ {
+ throw std::invalid_argument(name);
+ }
+}
+namespace graph
+{
+Target target_from_name(const std::string &name)
+{
+ static const std::map<std::string, Target> targets =
+ {
+ { "neon", Target::NEON },
+ { "cl", Target::CL },
+ { "gles", Target::GC },
+ };
+
+ try
+ {
+ return targets.at(arm_compute::utility::tolower(name));
+ }
+ catch(const std::out_of_range &)
+ {
+ throw std::invalid_argument(name);
+ }
+}
+} // namespace graph
+} // namespace arm_compute
diff --git a/src/graph/detail/ExecutionHelpers.cpp b/src/graph/detail/ExecutionHelpers.cpp
index d68092a2e7..6df67fcfec 100644
--- a/src/graph/detail/ExecutionHelpers.cpp
+++ b/src/graph/detail/ExecutionHelpers.cpp
@@ -206,15 +206,12 @@ void call_all_const_node_accessors(Graph &g)
}
}
-void call_all_input_node_accessors(ExecutionWorkload &workload)
+bool call_all_input_node_accessors(ExecutionWorkload &workload)
{
- for(auto &input : workload.inputs)
+ return !std::any_of(std::begin(workload.inputs), std::end(workload.inputs), [](Tensor * input_tensor)
{
- if(input != nullptr)
- {
- input->call_accessor();
- }
- }
+ return (input_tensor == nullptr) || !input_tensor->call_accessor();
+ });
}
void prepare_all_tasks(ExecutionWorkload &workload)
@@ -256,15 +253,15 @@ void call_all_tasks(ExecutionWorkload &workload)
}
}
-void call_all_output_node_accessors(ExecutionWorkload &workload)
+bool call_all_output_node_accessors(ExecutionWorkload &workload)
{
- for(auto &output : workload.outputs)
+ bool is_valid = true;
+ std::for_each(std::begin(workload.outputs), std::end(workload.outputs), [&](Tensor * output_tensor)
{
- if(output != nullptr)
- {
- output->call_accessor();
- }
- }
+ is_valid = is_valid && (output_tensor != nullptr) && output_tensor->call_accessor();
+ });
+
+ return is_valid;
}
} // namespace detail
} // namespace graph
diff --git a/tests/SConscript b/tests/SConscript
index a58e1f2ea9..4465080d5b 100644
--- a/tests/SConscript
+++ b/tests/SConscript
@@ -198,17 +198,17 @@ if test_env['benchmark_examples']:
# Graph examples
graph_utils = test_env.Object(source="../utils/GraphUtils.cpp", target="GraphUtils")
+ graph_params = test_env.Object(source="../utils/CommonGraphOptions.cpp", target="CommonGraphOptions")
for file in Glob("../examples/graph_*.cpp"):
example = "benchmark_" + os.path.basename(os.path.splitext(str(file))[0])
if env['os'] in ['android', 'bare_metal'] or env['standalone']:
- prog = test_env.Program(example, [ test_env.Object(source=file, target=example), graph_utils]+ files_benchmark_examples, LIBS = test_env["LIBS"], LINKFLAGS=test_env["LINKFLAGS"]+['-Wl,--whole-archive',arm_compute_lib,'-Wl,--no-whole-archive'])
+ prog = test_env.Program(example, [ test_env.Object(source=file, target=example), graph_utils, graph_params]+ files_benchmark_examples, LIBS = test_env["LIBS"], LINKFLAGS=test_env["LINKFLAGS"]+['-Wl,--whole-archive',arm_compute_lib,'-Wl,--no-whole-archive'])
arm_compute_benchmark_examples += [ prog ]
else:
#-Wl,--allow-shlib-undefined: Ignore dependencies of dependencies
- prog = test_env.Program(example, [ test_env.Object(source=file, target=example), graph_utils]+ files_benchmark_examples, LIBS = test_env["LIBS"] + ["arm_compute_graph"], LINKFLAGS=test_env["LINKFLAGS"]+['-Wl,--allow-shlib-undefined'] )
+ prog = test_env.Program(example, [ test_env.Object(source=file, target=example), graph_utils, graph_params]+ files_benchmark_examples, LIBS = test_env["LIBS"] + ["arm_compute_graph"], LINKFLAGS=test_env["LINKFLAGS"]+['-Wl,--allow-shlib-undefined'] )
arm_compute_benchmark_examples += [ prog ]
Depends(arm_compute_benchmark_examples, arm_compute_test_framework)
Depends(arm_compute_benchmark_examples, arm_compute_lib)
Default(arm_compute_benchmark_examples)
Export('arm_compute_benchmark_examples')
-
diff --git a/tests/benchmark_examples/RunExample.cpp b/tests/benchmark_examples/RunExample.cpp
index f6a9742048..05430e7628 100644
--- a/tests/benchmark_examples/RunExample.cpp
+++ b/tests/benchmark_examples/RunExample.cpp
@@ -30,9 +30,9 @@
#include "arm_compute/runtime/Scheduler.h"
#include "tests/framework/Framework.h"
#include "tests/framework/Macros.h"
-#include "tests/framework/command_line/CommandLineParser.h"
#include "tests/framework/command_line/CommonOptions.h"
#include "tests/framework/instruments/Instruments.h"
+#include "utils/command_line/CommandLineParser.h"
#ifdef ARM_COMPUTE_CL
#include "arm_compute/runtime/CL/CLScheduler.h"
@@ -74,9 +74,9 @@ public:
int run_example(int argc, char **argv, std::unique_ptr<Example> example)
{
- framework::CommandLineParser parser;
- framework::CommonOptions options(parser);
- auto example_args = parser.add_option<framework::ListOption<std::string>>("example_args");
+ utils::CommandLineParser parser;
+ framework::CommonOptions options(parser);
+ auto example_args = parser.add_option<utils::ListOption<std::string>>("example_args");
example_args->set_help("Arguments to pass to the example separated by commas (e.g: arg0,arg1,arg2)");
framework::Framework &framework = framework::Framework::get();
diff --git a/tests/framework/command_line/CommandLineParser.h b/tests/framework/command_line/CommandLineParser.h
deleted file mode 100644
index adb5214e2f..0000000000
--- a/tests/framework/command_line/CommandLineParser.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_COMMANDLINEPARSER
-#define ARM_COMPUTE_TEST_COMMANDLINEPARSER
-
-#include "../Utils.h"
-#include "Option.h"
-
-#include <map>
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace framework
-{
-/** Class to parse command line arguments. */
-class CommandLineParser final
-{
-public:
- /** Default constructor. */
- CommandLineParser() = default;
-
- /** Function to add a new option to the parser.
- *
- * @param[in] name Name of the option. Will be available under --name=VALUE.
- * @param[in] args Option specific configuration arguments.
- *
- * @return Pointer to the option. The option is owned by the parser.
- */
- template <typename T, typename... As>
- T *add_option(const std::string &name, As &&... args);
-
- /** Function to add a new positional argument to the parser.
- *
- * @param[in] args Option specific configuration arguments.
- *
- * @return Pointer to the option. The option is owned by the parser.
- */
- template <typename T, typename... As>
- T *add_positional_option(As &&... args);
-
- /** Parses the command line arguments and updates the options accordingly.
- *
- * @param[in] argc Number of arguments.
- * @param[in] argv Arguments.
- */
- void parse(int argc, char **argv);
-
- /** Validates the previously parsed command line arguments.
- *
- * Validation fails if not all required options are provided. Additionally
- * warnings are generated for options that have illegal values or unknown
- * options.
- *
- * @return True if all required options have been provided.
- */
- bool validate() const;
-
- /** Prints a help message for all configured options.
- *
- * @param[in] program_name Name of the program to be used in the help message.
- */
- void print_help(const std::string &program_name) const;
-
-private:
- using OptionsMap = std::map<std::string, std::unique_ptr<Option>>;
- using PositionalOptionsVector = std::vector<std::unique_ptr<Option>>;
-
- OptionsMap _options{};
- PositionalOptionsVector _positional_options{};
- std::vector<std::string> _unknown_options{};
- std::vector<std::string> _invalid_options{};
-};
-
-template <typename T, typename... As>
-inline T *CommandLineParser::add_option(const std::string &name, As &&... args)
-{
- auto result = _options.emplace(name, support::cpp14::make_unique<T>(name, std::forward<As>(args)...));
- return static_cast<T *>(result.first->second.get());
-}
-
-template <typename T, typename... As>
-inline T *CommandLineParser::add_positional_option(As &&... args)
-{
- _positional_options.emplace_back(support::cpp14::make_unique<T>(std::forward<As>(args)...));
- return static_cast<T *>(_positional_options.back().get());
-}
-} // namespace framework
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_COMMANDLINEPARSER */
diff --git a/tests/framework/command_line/CommonOptions.cpp b/tests/framework/command_line/CommonOptions.cpp
index e0b93f6830..f1c140e6fe 100644
--- a/tests/framework/command_line/CommonOptions.cpp
+++ b/tests/framework/command_line/CommonOptions.cpp
@@ -25,9 +25,10 @@
#include "../Framework.h"
#include "../printers/Printers.h"
-#include "CommandLineParser.h"
#include <unistd.h>
+using namespace arm_compute::utils;
+
namespace arm_compute
{
namespace test
diff --git a/tests/framework/command_line/CommonOptions.h b/tests/framework/command_line/CommonOptions.h
index 651316c557..b29c1d8dd5 100644
--- a/tests/framework/command_line/CommonOptions.h
+++ b/tests/framework/command_line/CommonOptions.h
@@ -25,7 +25,10 @@
#define ARM_COMPUTE_TEST_COMMONOPTIONS
#include "../instruments/Instruments.h"
-#include "CommandLineOptions.h"
+
+#include "utils/command_line/CommandLineOptions.h"
+#include "utils/command_line/CommandLineParser.h"
+
#include <memory>
namespace arm_compute
@@ -34,7 +37,6 @@ namespace test
{
namespace framework
{
-class CommandLineParser;
class Printer;
enum class LogFormat;
enum class LogLevel;
@@ -56,7 +58,7 @@ public:
*
* @param[in,out] parser A parser on which "parse()" hasn't been called yet.
*/
- CommonOptions(CommandLineParser &parser);
+ CommonOptions(arm_compute::utils::CommandLineParser &parser);
/** Prevent instances of this class from being copy constructed */
CommonOptions(const CommonOptions &) = delete;
/** Prevent instances of this class from being copied */
@@ -69,19 +71,19 @@ public:
*/
std::vector<std::unique_ptr<Printer>> create_printers();
- ToggleOption *help; /**< Show help option */
- EnumListOption<InstrumentsDescription> *instruments; /**< Instruments option */
- SimpleOption<int> *iterations; /**< Number of iterations option */
- SimpleOption<int> *threads; /**< Number of threads option */
- EnumOption<LogFormat> *log_format; /**< Log format option */
- SimpleOption<std::string> *log_file; /**< Log file option */
- EnumOption<LogLevel> *log_level; /**< Logging level option */
- ToggleOption *throw_errors; /**< Throw errors option */
- ToggleOption *color_output; /**< Color output option */
- ToggleOption *pretty_console; /**< Pretty console option */
- SimpleOption<std::string> *json_file; /**< JSON output file option */
- SimpleOption<std::string> *pretty_file; /**< Pretty output file option */
- std::vector<std::shared_ptr<std::ofstream>> log_streams; /**< Log streams */
+ arm_compute::utils::ToggleOption *help; /**< Show help option */
+ arm_compute::utils::EnumListOption<InstrumentsDescription> *instruments; /**< Instruments option */
+ arm_compute::utils::SimpleOption<int> *iterations; /**< Number of iterations option */
+ arm_compute::utils::SimpleOption<int> *threads; /**< Number of threads option */
+ arm_compute::utils::EnumOption<LogFormat> *log_format; /**< Log format option */
+ arm_compute::utils::SimpleOption<std::string> *log_file; /**< Log file option */
+ arm_compute::utils::EnumOption<LogLevel> *log_level; /**< Logging level option */
+ arm_compute::utils::ToggleOption *throw_errors; /**< Throw errors option */
+ arm_compute::utils::ToggleOption *color_output; /**< Color output option */
+ arm_compute::utils::ToggleOption *pretty_console; /**< Pretty console option */
+ arm_compute::utils::SimpleOption<std::string> *json_file; /**< JSON output file option */
+ arm_compute::utils::SimpleOption<std::string> *pretty_file; /**< Pretty output file option */
+ std::vector<std::shared_ptr<std::ofstream>> log_streams; /**< Log streams */
};
} // namespace framework
diff --git a/tests/framework/command_line/Option.cpp b/tests/framework/command_line/Option.cpp
deleted file mode 100644
index d60c35a698..0000000000
--- a/tests/framework/command_line/Option.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "Option.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace framework
-{
-Option::Option(std::string name)
- : _name{ std::move(name) }
-{
-}
-
-Option::Option(std::string name, bool is_required, bool is_set)
- : _name{ std::move(name) }, _is_required{ is_required }, _is_set{ is_set }
-{
-}
-
-std::string Option::name() const
-{
- return _name;
-}
-
-void Option::set_required(bool is_required)
-{
- _is_required = is_required;
-}
-
-void Option::set_help(std::string help)
-{
- _help = std::move(help);
-}
-
-bool Option::is_required() const
-{
- return _is_required;
-}
-
-bool Option::is_set() const
-{
- return _is_set;
-}
-} // namespace framework
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/framework/command_line/ToggleOption.cpp b/tests/framework/command_line/ToggleOption.cpp
deleted file mode 100644
index df5b1f813b..0000000000
--- a/tests/framework/command_line/ToggleOption.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "ToggleOption.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace framework
-{
-ToggleOption::ToggleOption(std::string name, bool default_value)
- : SimpleOption<bool>
-{
- std::move(name), default_value
-}
-{
-}
-
-bool ToggleOption::parse(std::string value)
-{
- if(value == "true")
- {
- _value = true;
- _is_set = true;
- }
- else if(value == "false")
- {
- _value = false;
- _is_set = true;
- }
-
- return _is_set;
-}
-
-std::string ToggleOption::help() const
-{
- return "--" + name() + ", --no-" + name() + " - " + _help;
-}
-} // namespace framework
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/main.cpp b/tests/main.cpp
index 4a8a82d490..ae9a47e1d4 100644
--- a/tests/main.cpp
+++ b/tests/main.cpp
@@ -28,11 +28,11 @@
#include "tests/framework/Framework.h"
#include "tests/framework/Macros.h"
#include "tests/framework/Profiler.h"
-#include "tests/framework/command_line/CommandLineOptions.h"
-#include "tests/framework/command_line/CommandLineParser.h"
#include "tests/framework/command_line/CommonOptions.h"
#include "tests/framework/instruments/Instruments.h"
#include "tests/framework/printers/Printers.h"
+#include "utils/command_line/CommandLineOptions.h"
+#include "utils/command_line/CommandLineParser.h"
#ifdef ARM_COMPUTE_CL
#include "arm_compute/runtime/CL/CLScheduler.h"
@@ -72,7 +72,7 @@ int main(int argc, char **argv)
framework::Framework &framework = framework::Framework::get();
- framework::CommandLineParser parser;
+ utils::CommandLineParser parser;
std::set<framework::DatasetMode> allowed_modes
{
@@ -83,23 +83,23 @@ int main(int argc, char **argv)
framework::CommonOptions options(parser);
- auto dataset_mode = parser.add_option<framework::EnumOption<framework::DatasetMode>>("mode", allowed_modes, framework::DatasetMode::PRECOMMIT);
+ auto dataset_mode = parser.add_option<utils::EnumOption<framework::DatasetMode>>("mode", allowed_modes, framework::DatasetMode::PRECOMMIT);
dataset_mode->set_help("For managed datasets select which group to use");
- auto filter = parser.add_option<framework::SimpleOption<std::string>>("filter", ".*");
+ auto filter = parser.add_option<utils::SimpleOption<std::string>>("filter", ".*");
filter->set_help("Regular expression to select test cases");
- auto filter_id = parser.add_option<framework::SimpleOption<std::string>>("filter-id");
+ auto filter_id = parser.add_option<utils::SimpleOption<std::string>>("filter-id");
filter_id->set_help("List of test ids. ... can be used to define a range.");
- auto stop_on_error = parser.add_option<framework::ToggleOption>("stop-on-error");
+ auto stop_on_error = parser.add_option<utils::ToggleOption>("stop-on-error");
stop_on_error->set_help("Abort execution after the first failed test (useful for debugging)");
- auto seed = parser.add_option<framework::SimpleOption<std::random_device::result_type>>("seed", std::random_device()());
+ auto seed = parser.add_option<utils::SimpleOption<std::random_device::result_type>>("seed", std::random_device()());
seed->set_help("Global seed for random number generation");
- auto list_tests = parser.add_option<framework::ToggleOption>("list-tests", false);
+ auto list_tests = parser.add_option<utils::ToggleOption>("list-tests", false);
list_tests->set_help("List all test names");
- auto test_instruments = parser.add_option<framework::ToggleOption>("test-instruments", false);
+ auto test_instruments = parser.add_option<utils::ToggleOption>("test-instruments", false);
test_instruments->set_help("Test if the instruments work on the platform");
- auto error_on_missing_assets = parser.add_option<framework::ToggleOption>("error-on-missing-assets", false);
+ auto error_on_missing_assets = parser.add_option<utils::ToggleOption>("error-on-missing-assets", false);
error_on_missing_assets->set_help("Mark a test as failed instead of skipping it when assets are missing");
- auto assets = parser.add_positional_option<framework::SimpleOption<std::string>>("assets");
+ auto assets = parser.add_positional_option<utils::SimpleOption<std::string>>("assets");
assets->set_help("Path to the assets directory");
try
diff --git a/tests/validate_examples/RunExample.cpp b/tests/validate_examples/RunExample.cpp
index 8b1c39b844..b4e5d37648 100644
--- a/tests/validate_examples/RunExample.cpp
+++ b/tests/validate_examples/RunExample.cpp
@@ -33,9 +33,9 @@
#include "tests/Globals.h"
#include "tests/framework/Framework.h"
#include "tests/framework/Macros.h"
-#include "tests/framework/command_line/CommandLineParser.h"
#include "tests/framework/command_line/CommonOptions.h"
#include "tests/framework/instruments/Instruments.h"
+#include "utils/command_line/CommandLineParser.h"
#ifdef ARM_COMPUTE_CL
#include "arm_compute/runtime/CL/CLScheduler.h"
@@ -87,13 +87,13 @@ public:
int run_example(int argc, char **argv, std::unique_ptr<ValidateExample> example)
{
- framework::CommandLineParser parser;
- framework::CommonOptions options(parser);
- auto example_args = parser.add_option<framework::ListOption<std::string>>("example_args");
+ utils::CommandLineParser parser;
+ framework::CommonOptions options(parser);
+ auto example_args = parser.add_option<utils::ListOption<std::string>>("example_args");
example_args->set_help("Arguments to pass to the example separated by commas (e.g: arg0,arg1,arg2)");
- auto seed = parser.add_option<framework::SimpleOption<std::random_device::result_type>>("seed", std::random_device()());
+ auto seed = parser.add_option<utils::SimpleOption<std::random_device::result_type>>("seed", std::random_device()());
seed->set_help("Global seed for random number generation");
- auto validate = parser.add_option<framework::SimpleOption<int>>("validate", 1);
+ auto validate = parser.add_option<utils::SimpleOption<int>>("validate", 1);
validate->set_help("Enable / disable output validation (0/1)");
framework::Framework &framework = framework::Framework::get();
diff --git a/utils/CommonGraphOptions.cpp b/utils/CommonGraphOptions.cpp
new file mode 100644
index 0000000000..d6ff0516aa
--- /dev/null
+++ b/utils/CommonGraphOptions.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "CommonGraphOptions.h"
+
+#include "arm_compute/graph/TypeLoader.h"
+#include "arm_compute/graph/TypePrinter.h"
+
+#include "support/ToolchainSupport.h"
+
+#include <map>
+
+using namespace arm_compute::graph;
+
+namespace
+{
+std::pair<unsigned int, unsigned int> parse_validation_range(const std::string &validation_range)
+{
+ std::pair<unsigned int /* start */, unsigned int /* end */> range = { 0, std::numeric_limits<unsigned int>::max() };
+ if(!validation_range.empty())
+ {
+ std::string str;
+ std::stringstream stream(validation_range);
+
+ // Get first value
+ std::getline(stream, str, ',');
+ if(stream.fail())
+ {
+ return range;
+ }
+ else
+ {
+ range.first = arm_compute::support::cpp11::stoi(str);
+ }
+
+ // Get second value
+ std::getline(stream, str);
+ if(stream.fail())
+ {
+ range.second = range.first;
+ return range;
+ }
+ else
+ {
+ range.second = arm_compute::support::cpp11::stoi(str);
+ }
+ }
+ return range;
+}
+} // namespace
+
+namespace arm_compute
+{
+namespace utils
+{
+::std::ostream &operator<<(::std::ostream &os, const CommonGraphParams &common_params)
+{
+ std::string false_str = std::string("false");
+ std::string true_str = std::string("true");
+
+ os << "Threads : " << common_params.threads << std::endl;
+ os << "Target : " << common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ os << "Data layout : " << common_params.data_layout << std::endl;
+ os << "Tuner enabled? : " << (common_params.enable_tuner ? true_str : false_str) << std::endl;
+ os << "Fast math enabled? : " << (common_params.fast_math_hint == FastMathHint::ENABLED ? true_str : false_str) << std::endl;
+ if(!common_params.data_path.empty())
+ {
+ os << "Data path : " << common_params.data_path << std::endl;
+ }
+ if(!common_params.image.empty())
+ {
+ os << "Image file : " << common_params.image << std::endl;
+ }
+ if(!common_params.labels.empty())
+ {
+ os << "Labels file : " << common_params.labels << std::endl;
+ }
+ if(!common_params.validation_file.empty())
+ {
+ os << "Validation range : " << common_params.validation_range_start << "-" << common_params.validation_range_end << std::endl;
+ os << "Validation file : " << common_params.validation_file << std::endl;
+ if(!common_params.validation_path.empty())
+ {
+ os << "Validation path : " << common_params.validation_path << std::endl;
+ }
+ }
+
+ return os;
+}
+
+CommonGraphOptions::CommonGraphOptions(CommandLineParser &parser)
+ : help(parser.add_option<ToggleOption>("help")),
+ threads(parser.add_option<SimpleOption<int>>("threads", 1)),
+ target(),
+ data_type(),
+ data_layout(),
+ enable_tuner(parser.add_option<ToggleOption>("enable-tuner")),
+ fast_math_hint(parser.add_option<ToggleOption>("fast-math")),
+ data_path(parser.add_option<SimpleOption<std::string>>("data")),
+ image(parser.add_option<SimpleOption<std::string>>("image")),
+ labels(parser.add_option<SimpleOption<std::string>>("labels")),
+ validation_file(parser.add_option<SimpleOption<std::string>>("validation-file")),
+ validation_path(parser.add_option<SimpleOption<std::string>>("validation-path")),
+ validation_range(parser.add_option<SimpleOption<std::string>>("validation-range"))
+{
+ std::set<arm_compute::graph::Target> supported_targets
+ {
+ Target::NEON,
+ Target::CL,
+ Target::GC,
+ };
+
+ std::set<arm_compute::DataType> supported_data_types
+ {
+ DataType::F16,
+ DataType::F32,
+ DataType::QASYMM8,
+ };
+
+ std::set<DataLayout> supported_data_layouts
+ {
+ DataLayout::NHWC,
+ DataLayout::NCHW,
+ };
+
+ target = parser.add_option<EnumOption<Target>>("target", supported_targets, Target::NEON);
+ data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
+ data_layout = parser.add_option<EnumOption<DataLayout>>("layout", supported_data_layouts, DataLayout::NCHW);
+
+ help->set_help("Show this help message");
+ threads->set_help("Number of threads to use");
+ target->set_help("Target to execute on");
+ data_type->set_help("Data type to use");
+ data_layout->set_help("Data layout to use");
+ enable_tuner->set_help("Enable tuner");
+ fast_math_hint->set_help("Enable fast math");
+ data_path->set_help("Path where graph parameters reside");
+ image->set_help("Input image for the graph");
+ labels->set_help("File containing the output labels");
+ validation_file->set_help("File used to validate the graph");
+ validation_path->set_help("Path to the validation data");
+ validation_range->set_help("Range of the images to validate for (Format : start,end)");
+}
+
+CommonGraphParams consume_common_graph_parameters(CommonGraphOptions &options)
+{
+ FastMathHint fast_math_hint_value = options.fast_math_hint->value() ? FastMathHint::ENABLED : FastMathHint::DISABLED;
+ auto validation_range = parse_validation_range(options.validation_range->value());
+
+ CommonGraphParams common_params;
+ common_params.help = options.help->is_set() ? options.help->value() : false;
+ common_params.threads = options.threads->value();
+ common_params.target = options.target->value();
+ common_params.data_type = options.data_type->value();
+ common_params.data_layout = options.data_layout->value();
+ common_params.enable_tuner = options.enable_tuner->is_set() ? options.enable_tuner->value() : false;
+ common_params.fast_math_hint = options.fast_math_hint->is_set() ? fast_math_hint_value : FastMathHint::DISABLED;
+ common_params.data_path = options.data_path->value();
+ common_params.image = options.image->value();
+ common_params.labels = options.labels->value();
+ common_params.validation_file = options.validation_file->value();
+ common_params.validation_path = options.validation_path->value();
+ common_params.validation_range_start = validation_range.first;
+ common_params.validation_range_end = validation_range.second;
+
+ return common_params;
+}
+} // namespace utils
+} // namespace arm_compute
diff --git a/utils/CommonGraphOptions.h b/utils/CommonGraphOptions.h
new file mode 100644
index 0000000000..ef2e4fb946
--- /dev/null
+++ b/utils/CommonGraphOptions.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_EXAMPLES_UTILS_COMMON_GRAPH_OPTIONS
+#define ARM_COMPUTE_EXAMPLES_UTILS_COMMON_GRAPH_OPTIONS
+
+#include "utils/command_line/CommandLineOptions.h"
+#include "utils/command_line/CommandLineParser.h"
+
+#include "arm_compute/graph/TypeLoader.h"
+#include "arm_compute/graph/TypePrinter.h"
+
+namespace arm_compute
+{
+namespace utils
+{
+/** Structure holding all the common graph parameters */
+struct CommonGraphParams
+{
+ bool help{ false };
+ int threads{ 0 };
+ arm_compute::graph::Target target{ arm_compute::graph::Target::NEON };
+ arm_compute::DataType data_type{ DataType::F32 };
+ arm_compute::DataLayout data_layout{ DataLayout::NCHW };
+ bool enable_tuner{ false };
+ arm_compute::graph::FastMathHint fast_math_hint{ arm_compute::graph::FastMathHint::DISABLED };
+ std::string data_path{};
+ std::string image{};
+ std::string labels{};
+ std::string validation_file{};
+ std::string validation_path{};
+ unsigned int validation_range_start{ 0 };
+ unsigned int validation_range_end{ std::numeric_limits<unsigned int>::max() };
+};
+
+/** Formatted output of the CommonGraphParams type
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Common parameters to output
+ *
+ * @return Modified output stream.
+ */
+::std::ostream &operator<<(::std::ostream &os, const CommonGraphParams &common_params);
+
+/** Common command line options used to configure the graph examples
+ *
+ * The options in this object get populated when "parse()" is called on the parser used to construct it.
+ * The expected workflow is:
+ *
+ * CommandLineParser parser;
+ * CommonOptions options( parser );
+ * parser.parse(argc, argv);
+ */
+class CommonGraphOptions
+{
+public:
+ /** Constructor
+ *
+ * @param[in,out] parser A parser on which "parse()" hasn't been called yet.
+ */
+ CommonGraphOptions(CommandLineParser &parser);
+ /** Prevent instances of this class from being copy constructed */
+ CommonGraphOptions(const CommonGraphOptions &) = delete;
+ /** Prevent instances of this class from being copied */
+ CommonGraphOptions &operator=(const CommonGraphOptions &) = delete;
+
+ ToggleOption *help; /**< Show help option */
+ SimpleOption<int> *threads; /**< Number of threads option */
+ EnumOption<arm_compute::graph::Target> *target; /**< Graph execution target */
+ EnumOption<arm_compute::DataType> *data_type; /**< Graph data type */
+ EnumOption<arm_compute::DataLayout> *data_layout; /**< Graph data layout */
+ ToggleOption *enable_tuner; /**< Enable tuner */
+ ToggleOption *fast_math_hint; /**< Fast math hint */
+ SimpleOption<std::string> *data_path; /**< Trainable parameters path */
+ SimpleOption<std::string> *image; /**< Image */
+ SimpleOption<std::string> *labels; /**< Labels */
+ SimpleOption<std::string> *validation_file; /**< Validation file */
+ SimpleOption<std::string> *validation_path; /**< Validation data path */
+ SimpleOption<std::string> *validation_range; /**< Validation range */
+};
+
+/** Consumes the common graph options and creates a structure containing any information
+ *
+ * @param[in] options Options to consume
+ *
+ * @return Structure containing the commnon graph parameters
+ */
+CommonGraphParams consume_common_graph_parameters(CommonGraphOptions &options);
+} // namespace utils
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_EXAMPLES_UTILS_COMMON_GRAPH_OPTIONS */
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index d94dcb0d86..5c1fda5ca6 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -26,11 +26,13 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/graph/Logger.h"
#include "arm_compute/runtime/SubTensor.h"
#include "utils/ImageLoader.h"
#include "utils/Utils.h"
#include <iomanip>
+#include <limits>
using namespace arm_compute::graph_utils;
@@ -169,17 +171,18 @@ bool NumPyAccessor::access_tensor(ITensor &tensor)
return false;
}
-PPMAccessor::PPMAccessor(std::string ppm_path, bool bgr, std::unique_ptr<IPreprocessor> preprocessor)
- : _ppm_path(std::move(ppm_path)), _bgr(bgr), _preprocessor(std::move(preprocessor))
+ImageAccessor::ImageAccessor(std::string filename, bool bgr, std::unique_ptr<IPreprocessor> preprocessor)
+ : _filename(std::move(filename)), _bgr(bgr), _preprocessor(std::move(preprocessor))
{
}
-bool PPMAccessor::access_tensor(ITensor &tensor)
+bool ImageAccessor::access_tensor(ITensor &tensor)
{
- utils::PPMLoader ppm;
+ auto image_loader = utils::ImageLoaderFactory::create(_filename);
+ ARM_COMPUTE_ERROR_ON_MSG(image_loader == nullptr, "Unsupported image type");
- // Open PPM file
- ppm.open(_ppm_path);
+ // Open image file
+ image_loader->open(_filename);
// Get permutated shape and permutation parameters
TensorShape permuted_shape = tensor.info()->tensor_shape();
@@ -188,11 +191,12 @@ bool PPMAccessor::access_tensor(ITensor &tensor)
{
std::tie(permuted_shape, perm) = compute_permutation_paramaters(tensor.info()->tensor_shape(), tensor.info()->data_layout());
}
- ARM_COMPUTE_ERROR_ON_MSG(ppm.width() != permuted_shape.x() || ppm.height() != permuted_shape.y(),
- "Failed to load image file: dimensions [%d,%d] not correct, expected [%d,%d].", ppm.width(), ppm.height(), permuted_shape.x(), permuted_shape.y());
+ ARM_COMPUTE_ERROR_ON_MSG(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
+ "Failed to load image file: dimensions [%d,%d] not correct, expected [%d,%d].",
+ image_loader->width(), image_loader->height(), permuted_shape.x(), permuted_shape.y());
// Fill the tensor with the PPM content (BGR)
- ppm.fill_planar_tensor(tensor, _bgr);
+ image_loader->fill_planar_tensor(tensor, _bgr);
// Preprocess tensor
if(_preprocessor)
@@ -203,12 +207,13 @@ bool PPMAccessor::access_tensor(ITensor &tensor)
return true;
}
-ValidationInputAccessor::ValidationInputAccessor(const std::string &image_list,
- std::string images_path,
- bool bgr,
- unsigned int start,
- unsigned int end)
- : _path(std::move(images_path)), _images(), _bgr(bgr), _offset(0)
+ValidationInputAccessor::ValidationInputAccessor(const std::string &image_list,
+ std::string images_path,
+ std::unique_ptr<IPreprocessor> preprocessor,
+ bool bgr,
+ unsigned int start,
+ unsigned int end)
+ : _path(std::move(images_path)), _images(), _preprocessor(std::move(preprocessor)), _bgr(bgr), _offset(0)
{
ARM_COMPUTE_ERROR_ON_MSG(start > end, "Invalid validation range!");
@@ -247,7 +252,9 @@ bool ValidationInputAccessor::access_tensor(arm_compute::ITensor &tensor)
utils::JPEGLoader jpeg;
// Open JPEG file
- jpeg.open(_path + _images[_offset++]);
+ std::string image_name = _path + _images[_offset++];
+ jpeg.open(image_name);
+ ARM_COMPUTE_LOG_GRAPH_INFO("Validating " << image_name << std::endl);
// Get permutated shape and permutation parameters
TensorShape permuted_shape = tensor.info()->tensor_shape();
@@ -261,22 +268,26 @@ bool ValidationInputAccessor::access_tensor(arm_compute::ITensor &tensor)
"Failed to load image file: dimensions [%d,%d] not correct, expected [%d,%d].",
jpeg.width(), jpeg.height(), permuted_shape.x(), permuted_shape.y());
- // Fill the tensor with the PPM content (BGR)
+ // Fill the tensor with the JPEG content (BGR)
jpeg.fill_planar_tensor(tensor, _bgr);
+
+ // Preprocess tensor
+ if(_preprocessor)
+ {
+ _preprocessor->preprocess(tensor);
+ }
}
return ret;
}
ValidationOutputAccessor::ValidationOutputAccessor(const std::string &image_list,
- size_t top_n,
std::ostream &output_stream,
unsigned int start,
unsigned int end)
- : _results(), _output_stream(output_stream), _top_n(top_n), _offset(0), _positive_samples(0)
+ : _results(), _output_stream(output_stream), _offset(0), _positive_samples_top1(0), _positive_samples_top5(0)
{
ARM_COMPUTE_ERROR_ON_MSG(start > end, "Invalid validation range!");
- ARM_COMPUTE_ERROR_ON(top_n == 0);
std::ifstream ifs;
try
@@ -308,13 +319,15 @@ ValidationOutputAccessor::ValidationOutputAccessor(const std::string &image_list
void ValidationOutputAccessor::reset()
{
- _offset = 0;
- _positive_samples = 0;
+ _offset = 0;
+ _positive_samples_top1 = 0;
+ _positive_samples_top5 = 0;
}
bool ValidationOutputAccessor::access_tensor(arm_compute::ITensor &tensor)
{
- if(_offset < _results.size())
+ bool ret = _offset < _results.size();
+ if(ret)
{
// Get results
std::vector<size_t> tensor_results;
@@ -332,30 +345,16 @@ bool ValidationOutputAccessor::access_tensor(arm_compute::ITensor &tensor)
// Check if tensor results are within top-n accuracy
size_t correct_label = _results[_offset++];
- auto is_valid_label = [&](size_t label)
- {
- return label == correct_label;
- };
- if(std::any_of(std::begin(tensor_results), std::begin(tensor_results) + _top_n - 1, is_valid_label))
- {
- ++_positive_samples;
- }
+ aggregate_sample(tensor_results, _positive_samples_top1, 1, correct_label);
+ aggregate_sample(tensor_results, _positive_samples_top5, 5, correct_label);
}
// Report top_n accuracy
- bool ret = _offset >= _results.size();
- if(ret)
+ if(_offset >= _results.size())
{
- size_t total_samples = _results.size();
- size_t negative_samples = total_samples - _positive_samples;
- float accuracy = _positive_samples / static_cast<float>(total_samples);
-
- _output_stream << "----------Top " << _top_n << " accuracy ----------" << std::endl
- << std::endl;
- _output_stream << "Positive samples : " << _positive_samples << std::endl;
- _output_stream << "Negative samples : " << negative_samples << std::endl;
- _output_stream << "Accuracy : " << accuracy << std::endl;
+ report_top_n(1, _results.size(), _positive_samples_top1);
+ report_top_n(5, _results.size(), _positive_samples_top5);
}
return ret;
@@ -383,6 +382,31 @@ std::vector<size_t> ValidationOutputAccessor::access_predictions_tensor(arm_comp
return index;
}
+void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label)
+{
+ auto is_valid_label = [correct_label](size_t label)
+ {
+ return label == correct_label;
+ };
+
+ if(std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label))
+ {
+ ++positive_samples;
+ }
+}
+
+void ValidationOutputAccessor::report_top_n(size_t top_n, size_t total_samples, size_t positive_samples)
+{
+ size_t negative_samples = total_samples - positive_samples;
+ float accuracy = positive_samples / static_cast<float>(total_samples);
+
+ _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl
+ << std::endl;
+ _output_stream << "Positive samples : " << positive_samples << std::endl;
+ _output_stream << "Negative samples : " << negative_samples << std::endl;
+ _output_stream << "Accuracy : " << accuracy << std::endl;
+}
+
TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
: _labels(), _output_stream(output_stream), _top_n(top_n)
{
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index 768c608d26..8558b9066c 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -31,6 +31,8 @@
#include "arm_compute/graph/Types.h"
#include "arm_compute/runtime/Tensor.h"
+#include "utils/CommonGraphOptions.h"
+
#include <array>
#include <random>
#include <string>
@@ -150,25 +152,25 @@ private:
std::ostream &_output_stream;
};
-/** PPM accessor class */
-class PPMAccessor final : public graph::ITensorAccessor
+/** Image accessor class */
+class ImageAccessor final : public graph::ITensorAccessor
{
public:
/** Constructor
*
- * @param[in] ppm_path Path to PPM file
+ * @param[in] filename Image file
* @param[in] bgr (Optional) Fill the first plane with blue channel (default = false - RGB format)
- * @param[in] preprocessor (Optional) PPM pre-processing object
+ * @param[in] preprocessor (Optional) Image pre-processing object
*/
- PPMAccessor(std::string ppm_path, bool bgr = true, std::unique_ptr<IPreprocessor> preprocessor = nullptr);
+ ImageAccessor(std::string filename, bool bgr = true, std::unique_ptr<IPreprocessor> preprocessor = nullptr);
/** Allow instances of this class to be move constructed */
- PPMAccessor(PPMAccessor &&) = default;
+ ImageAccessor(ImageAccessor &&) = default;
// Inherited methods overriden:
bool access_tensor(ITensor &tensor) override;
private:
- const std::string _ppm_path;
+ const std::string _filename;
const bool _bgr;
std::unique_ptr<IPreprocessor> _preprocessor;
};
@@ -179,28 +181,31 @@ class ValidationInputAccessor final : public graph::ITensorAccessor
public:
/** Constructor
*
- * @param[in] image_list File containing all the images to validate
- * @param[in] images_path Path to images.
- * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false - RGB format)
- * @param[in] start (Optional) Start range
- * @param[in] end (Optional) End range
+ * @param[in] image_list File containing all the images to validate
+ * @param[in] images_path Path to images.
+ * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false - RGB format)
+ * @param[in] preprocessor (Optional) Image pre-processing object (default = nullptr)
+ * @param[in] start (Optional) Start range
+ * @param[in] end (Optional) End range
*
* @note Range is defined as [start, end]
*/
- ValidationInputAccessor(const std::string &image_list,
- std::string images_path,
- bool bgr = true,
- unsigned int start = 0,
- unsigned int end = 0);
+ ValidationInputAccessor(const std::string &image_list,
+ std::string images_path,
+ std::unique_ptr<IPreprocessor> preprocessor = nullptr,
+ bool bgr = true,
+ unsigned int start = 0,
+ unsigned int end = 0);
// Inherited methods overriden:
bool access_tensor(ITensor &tensor) override;
private:
- std::string _path;
- std::vector<std::string> _images;
- bool _bgr;
- size_t _offset;
+ std::string _path;
+ std::vector<std::string> _images;
+ std::unique_ptr<IPreprocessor> _preprocessor;
+ bool _bgr;
+ size_t _offset;
};
/** Output Accessor used for network validation */
@@ -210,7 +215,6 @@ public:
/** Default Constructor
*
* @param[in] image_list File containing all the images and labels results
- * @param[in] top_n (Optional) Top N accuracy (Defaults to 5)
* @param[out] output_stream (Optional) Output stream (Defaults to the standard output stream)
* @param[in] start (Optional) Start range
* @param[in] end (Optional) End range
@@ -218,7 +222,6 @@ public:
* @note Range is defined as [start, end]
*/
ValidationOutputAccessor(const std::string &image_list,
- size_t top_n = 5,
std::ostream &output_stream = std::cout,
unsigned int start = 0,
unsigned int end = 0);
@@ -237,13 +240,28 @@ private:
*/
template <typename T>
std::vector<size_t> access_predictions_tensor(ITensor &tensor);
+ /** Aggregates the results of a sample
+ *
+ * @param[in] res Vector containing the results of a graph
+ * @param[in,out] positive_samples Positive samples to be updated
+ * @param[in] top_n Top n accuracy to measure
+ * @param[in] correct_label Correct label of the current sample
+ */
+ void aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label);
+ /** Reports top N accuracy
+ *
+ * @param[in] top_n Top N accuracy that is being reported
+ * @param[in] total_samples Total number of samples
+ * @param[in] positive_samples Positive samples
+ */
+ void report_top_n(size_t top_n, size_t total_samples, size_t positive_samples);
private:
std::vector<int> _results;
std::ostream &_output_stream;
- size_t _top_n;
size_t _offset;
- size_t _positive_samples;
+ size_t _positive_samples_top1;
+ size_t _positive_samples_top5;
};
/** Result accessor class */
@@ -359,56 +377,78 @@ inline std::unique_ptr<graph::ITensorAccessor> get_weights_accessor(const std::s
}
}
-/** Generates appropriate input accessor according to the specified ppm_path
+/** Generates appropriate input accessor according to the specified graph parameters
*
- * @note If ppm_path is empty will generate a DummyAccessor else will generate a PPMAccessor
- *
- * @param[in] ppm_path Path to PPM file
- * @param[in] preprocessor Preproccessor object
- * @param[in] bgr (Optional) Fill the first plane with blue channel (default = true)
+ * @param[in] graph_parameters Graph parameters
+ * @param[in] preprocessor (Optional) Preproccessor object
+ * @param[in] bgr (Optional) Fill the first plane with blue channel (default = true)
*
* @return An appropriate tensor accessor
*/
-inline std::unique_ptr<graph::ITensorAccessor> get_input_accessor(const std::string &ppm_path,
- std::unique_ptr<IPreprocessor> preprocessor = nullptr,
- bool bgr = true)
+inline std::unique_ptr<graph::ITensorAccessor> get_input_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters,
+ std::unique_ptr<IPreprocessor> preprocessor = nullptr,
+ bool bgr = true)
{
- if(ppm_path.empty())
+ if(!graph_parameters.validation_file.empty())
{
- return arm_compute::support::cpp14::make_unique<DummyAccessor>();
+ return arm_compute::support::cpp14::make_unique<ValidationInputAccessor>(graph_parameters.validation_file,
+ graph_parameters.validation_path,
+ std::move(preprocessor),
+ bgr,
+ graph_parameters.validation_range_start,
+ graph_parameters.validation_range_end);
}
else
{
- if(arm_compute::utility::endswith(ppm_path, ".npy"))
+ const std::string &image_file = graph_parameters.image;
+ if(arm_compute::utility::endswith(image_file, ".npy"))
+ {
+ return arm_compute::support::cpp14::make_unique<NumPyBinLoader>(image_file);
+ }
+ else if(arm_compute::utility::endswith(image_file, ".jpeg")
+ || arm_compute::utility::endswith(image_file, ".jpg")
+ || arm_compute::utility::endswith(image_file, ".ppm"))
{
- return arm_compute::support::cpp14::make_unique<NumPyBinLoader>(ppm_path);
+ return arm_compute::support::cpp14::make_unique<ImageAccessor>(image_file, bgr, std::move(preprocessor));
}
else
{
- return arm_compute::support::cpp14::make_unique<PPMAccessor>(ppm_path, bgr, std::move(preprocessor));
+ return arm_compute::support::cpp14::make_unique<DummyAccessor>();
}
}
}
-/** Generates appropriate output accessor according to the specified labels_path
+/** Generates appropriate output accessor according to the specified graph parameters
*
- * @note If labels_path is empty will generate a DummyAccessor else will generate a TopNPredictionsAccessor
+ * @note If the output accessor is requested to validate the graph then ValidationOutputAccessor is generated
+ * else if output_accessor_file is empty will generate a DummyAccessor else will generate a TopNPredictionsAccessor
*
- * @param[in] labels_path Path to labels text file
- * @param[in] top_n (Optional) Number of output classes to print
- * @param[out] output_stream (Optional) Output stream
+ * @param[in] graph_parameters Graph parameters
+ * @param[in] top_n (Optional) Number of output classes to print (default = 5)
+ * @param[in] is_validation (Optional) Validation flag (default = false)
+ * @param[out] output_stream (Optional) Output stream (default = std::cout)
*
* @return An appropriate tensor accessor
*/
-inline std::unique_ptr<graph::ITensorAccessor> get_output_accessor(const std::string &labels_path, size_t top_n = 5, std::ostream &output_stream = std::cout)
+inline std::unique_ptr<graph::ITensorAccessor> get_output_accessor(const arm_compute::utils::CommonGraphParams &graph_parameters,
+ size_t top_n = 5,
+ bool is_validation = false,
+ std::ostream &output_stream = std::cout)
{
- if(labels_path.empty())
+ if(!graph_parameters.validation_file.empty())
+ {
+ return arm_compute::support::cpp14::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file,
+ output_stream,
+ graph_parameters.validation_range_start,
+ graph_parameters.validation_range_end);
+ }
+ else if(graph_parameters.labels.empty())
{
return arm_compute::support::cpp14::make_unique<DummyAccessor>(0);
}
else
{
- return arm_compute::support::cpp14::make_unique<TopNPredictionsAccessor>(labels_path, top_n, output_stream);
+ return arm_compute::support::cpp14::make_unique<TopNPredictionsAccessor>(graph_parameters.labels, top_n, output_stream);
}
}
/** Generates appropriate npy output accessor according to the specified npy_path
diff --git a/utils/ImageLoader.h b/utils/ImageLoader.h
index edc89286a2..cc9619d3f1 100644
--- a/utils/ImageLoader.h
+++ b/utils/ImageLoader.h
@@ -486,6 +486,32 @@ private:
bool _is_loaded;
std::unique_ptr<uint8_t, malloc_deleter> _data;
};
+
+/** Factory for generating appropriate image loader**/
+class ImageLoaderFactory final
+{
+public:
+ /** Create an image loader depending on the image type
+ *
+ * @param[in] filename File than needs to be loaded
+ *
+ * @return Image loader
+ */
+ static std::unique_ptr<IImageLoader> create(const std::string &filename)
+ {
+ ImageType type = arm_compute::utils::get_image_type_from_file(filename);
+ switch(type)
+ {
+ case ImageType::PPM:
+ return support::cpp14::make_unique<PPMLoader>();
+ case ImageType::JPEG:
+ return support::cpp14::make_unique<JPEGLoader>();
+ case ImageType::UNKNOWN:
+ default:
+ return nullptr;
+ }
+ }
+};
} // namespace utils
} // namespace arm_compute
#endif /* __UTILS_IMAGE_LOADER_H__*/
diff --git a/utils/Utils.cpp b/utils/Utils.cpp
index a5c6a95a2a..133248e30c 100644
--- a/utils/Utils.cpp
+++ b/utils/Utils.cpp
@@ -74,7 +74,11 @@ int run_example(int argc, char **argv, std::unique_ptr<Example> example)
try
{
- example->do_setup(argc, argv);
+ bool status = example->do_setup(argc, argv);
+ if(!status)
+ {
+ return 1;
+ }
example->do_run();
example->do_teardown();
@@ -141,6 +145,41 @@ void draw_detection_rectangle(ITensor *tensor, const DetectionWindow &rect, uint
}
}
+ImageType get_image_type_from_file(const std::string &filename)
+{
+ ImageType type = ImageType::UNKNOWN;
+
+ try
+ {
+ // Open file
+ std::ifstream fs;
+ fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
+ fs.open(filename, std::ios::in | std::ios::binary);
+
+ // Identify type from magic number
+ std::array<unsigned char, 2> magic_number{ { 0 } };
+ fs >> magic_number[0] >> magic_number[1];
+
+ // PPM check
+ if(static_cast<char>(magic_number[0]) == 'P' && static_cast<char>(magic_number[1]) == '6')
+ {
+ type = ImageType::PPM;
+ }
+ else if(magic_number[0] == 0xFF && magic_number[1] == 0xD8)
+ {
+ type = ImageType::JPEG;
+ }
+
+ fs.close();
+ }
+ catch(std::runtime_error &e)
+ {
+ ARM_COMPUTE_ERROR("Accessing %s: %s", filename.c_str(), e.what());
+ }
+
+ return type;
+}
+
std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs)
{
// Check the PPM magic number is valid
diff --git a/utils/Utils.h b/utils/Utils.h
index c18ad217a4..ced6b147f1 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -55,6 +55,14 @@ namespace arm_compute
{
namespace utils
{
+/** Supported image types */
+enum class ImageType
+{
+ UNKNOWN,
+ PPM,
+ JPEG
+};
+
/** Abstract Example class.
*
* All examples have to inherit from this class.
@@ -66,8 +74,13 @@ public:
*
* @param[in] argc Argument count.
* @param[in] argv Argument values.
+ *
+ * @return True in case of no errors in setup else false
*/
- virtual void do_setup(int argc, char **argv) {};
+ virtual bool do_setup(int argc, char **argv)
+ {
+ return true;
+ };
/** Run the example. */
virtual void do_run() {};
/** Teardown the example. */
@@ -101,6 +114,14 @@ int run_example(int argc, char **argv)
*/
void draw_detection_rectangle(arm_compute::ITensor *tensor, const arm_compute::DetectionWindow &rect, uint8_t r, uint8_t g, uint8_t b);
+/** Gets image type given a file
+ *
+ * @param[in] filename File to identify its image type
+ *
+ * @return Image type
+ */
+ImageType get_image_type_from_file(const std::string &filename);
+
/** Parse the ppm header from an input file stream. At the end of the execution,
* the file position pointer will be located at the first pixel stored in the ppm file
*
@@ -167,7 +188,7 @@ inline std::string get_typestring(DataType data_type)
case DataType::SIZET:
return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ ARM_COMPUTE_ERROR("Data type not supported");
}
}
diff --git a/tests/framework/command_line/CommandLineOptions.h b/utils/command_line/CommandLineOptions.h
index cb4b794a3e..8f82815020 100644
--- a/tests/framework/command_line/CommandLineOptions.h
+++ b/utils/command_line/CommandLineOptions.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_COMMANDLINEOPTIONS
-#define ARM_COMPUTE_TEST_COMMANDLINEOPTIONS
+#ifndef ARM_COMPUTE_UTILS_COMMANDLINEOPTIONS
+#define ARM_COMPUTE_UTILS_COMMANDLINEOPTIONS
#include "EnumListOption.h"
#include "EnumOption.h"
@@ -30,4 +30,4 @@
#include "Option.h"
#include "ToggleOption.h"
-#endif /* ARM_COMPUTE_TEST_COMMANDLINEOPTIONS */
+#endif /* ARM_COMPUTE_UTILS_COMMANDLINEOPTIONS */
diff --git a/tests/framework/command_line/CommandLineParser.cpp b/utils/command_line/CommandLineParser.h
index 09b466ce84..06c4bf5e2f 100644
--- a/tests/framework/command_line/CommandLineParser.cpp
+++ b/utils/command_line/CommandLineParser.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,18 +21,99 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "CommandLineParser.h"
+#ifndef ARM_COMPUTE_UTILS_COMMANDLINEPARSER
+#define ARM_COMPUTE_UTILS_COMMANDLINEPARSER
+
+#include "Option.h"
+#include "arm_compute/core/utils/misc/Utility.h"
+#include "support/ToolchainSupport.h"
#include <iostream>
+#include <map>
+#include <memory>
#include <regex>
+#include <string>
+#include <utility>
+#include <vector>
namespace arm_compute
{
-namespace test
+namespace utils
+{
+/** Class to parse command line arguments. */
+class CommandLineParser final
{
-namespace framework
+public:
+ /** Default constructor. */
+ CommandLineParser() = default;
+
+ /** Function to add a new option to the parser.
+ *
+ * @param[in] name Name of the option. Will be available under --name=VALUE.
+ * @param[in] args Option specific configuration arguments.
+ *
+ * @return Pointer to the option. The option is owned by the parser.
+ */
+ template <typename T, typename... As>
+ T *add_option(const std::string &name, As &&... args);
+
+ /** Function to add a new positional argument to the parser.
+ *
+ * @param[in] args Option specific configuration arguments.
+ *
+ * @return Pointer to the option. The option is owned by the parser.
+ */
+ template <typename T, typename... As>
+ T *add_positional_option(As &&... args);
+
+ /** Parses the command line arguments and updates the options accordingly.
+ *
+ * @param[in] argc Number of arguments.
+ * @param[in] argv Arguments.
+ */
+ void parse(int argc, char **argv);
+
+ /** Validates the previously parsed command line arguments.
+ *
+ * Validation fails if not all required options are provided. Additionally
+ * warnings are generated for options that have illegal values or unknown
+ * options.
+ *
+ * @return True if all required options have been provided.
+ */
+ bool validate() const;
+
+ /** Prints a help message for all configured options.
+ *
+ * @param[in] program_name Name of the program to be used in the help message.
+ */
+ void print_help(const std::string &program_name) const;
+
+private:
+ using OptionsMap = std::map<std::string, std::unique_ptr<Option>>;
+ using PositionalOptionsVector = std::vector<std::unique_ptr<Option>>;
+
+ OptionsMap _options{};
+ PositionalOptionsVector _positional_options{};
+ std::vector<std::string> _unknown_options{};
+ std::vector<std::string> _invalid_options{};
+};
+
+template <typename T, typename... As>
+inline T *CommandLineParser::add_option(const std::string &name, As &&... args)
{
-void CommandLineParser::parse(int argc, char **argv)
+ auto result = _options.emplace(name, support::cpp14::make_unique<T>(name, std::forward<As>(args)...));
+ return static_cast<T *>(result.first->second.get());
+}
+
+template <typename T, typename... As>
+inline T *CommandLineParser::add_positional_option(As &&... args)
+{
+ _positional_options.emplace_back(support::cpp14::make_unique<T>(std::forward<As>(args)...));
+ return static_cast<T *>(_positional_options.back().get());
+}
+
+inline void CommandLineParser::parse(int argc, char **argv)
{
const std::regex option_regex{ "--((?:no-)?)([^=]+)(?:=(.*))?" };
@@ -60,7 +141,7 @@ void CommandLineParser::parse(int argc, char **argv)
int equal_sign = mixed_case_opt.find('=');
int pos = (equal_sign == -1) ? strlen(argv[i]) : equal_sign;
- const std::string option = tolower(mixed_case_opt.substr(0, pos)) + mixed_case_opt.substr(pos);
+ const std::string option = arm_compute::utility::tolower(mixed_case_opt.substr(0, pos)) + mixed_case_opt.substr(pos);
std::smatch option_matches;
if(std::regex_match(option, option_matches, option_regex))
@@ -98,7 +179,7 @@ void CommandLineParser::parse(int argc, char **argv)
}
}
-bool CommandLineParser::validate() const
+inline bool CommandLineParser::validate() const
{
bool is_valid = true;
@@ -133,7 +214,7 @@ bool CommandLineParser::validate() const
return is_valid;
}
-void CommandLineParser::print_help(const std::string &program_name) const
+inline void CommandLineParser::print_help(const std::string &program_name) const
{
std::cout << "usage: " << program_name << " \n";
@@ -148,6 +229,6 @@ void CommandLineParser::print_help(const std::string &program_name) const
std::cout << option->name() << "\n";
}
}
-} // namespace framework
-} // namespace test
+} // namespace utils
} // namespace arm_compute
+#endif /* ARM_COMPUTE_UTILS_COMMANDLINEPARSER */
diff --git a/tests/framework/command_line/EnumListOption.h b/utils/command_line/EnumListOption.h
index 39006d86b9..834becbaef 100644
--- a/tests/framework/command_line/EnumListOption.h
+++ b/utils/command_line/EnumListOption.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ENUMLISTOPTION
-#define ARM_COMPUTE_TEST_ENUMLISTOPTION
+#ifndef ARM_COMPUTE_UTILS_ENUMLISTOPTION
+#define ARM_COMPUTE_UTILS_ENUMLISTOPTION
#include "Option.h"
@@ -35,9 +35,7 @@
namespace arm_compute
{
-namespace test
-{
-namespace framework
+namespace utils
{
/** Implementation of an option that accepts any number of values from a fixed set. */
template <typename T>
@@ -147,7 +145,6 @@ inline const std::vector<T> &EnumListOption<T>::value() const
{
return _values;
}
-} // namespace framework
-} // namespace test
+} // namespace utils
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ENUMLISTOPTION */
+#endif /* ARM_COMPUTE_UTILS_ENUMLISTOPTION */
diff --git a/tests/framework/command_line/EnumOption.h b/utils/command_line/EnumOption.h
index 14d61859ae..b775db23fb 100644
--- a/tests/framework/command_line/EnumOption.h
+++ b/utils/command_line/EnumOption.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ENUMOPTION
-#define ARM_COMPUTE_TEST_ENUMOPTION
+#ifndef ARM_COMPUTE_UTILS_ENUMOPTION
+#define ARM_COMPUTE_UTILS_ENUMOPTION
#include "SimpleOption.h"
@@ -33,9 +33,7 @@
namespace arm_compute
{
-namespace test
-{
-namespace framework
+namespace utils
{
/** Implementation of a simple option that accepts a value from a fixed set. */
template <typename T>
@@ -133,7 +131,6 @@ inline const T &EnumOption<T>::value() const
{
return this->_value;
}
-} // namespace framework
-} // namespace test
+} // namespace utils
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ENUMOPTION */
+#endif /* ARM_COMPUTE_UTILS_ENUMOPTION */
diff --git a/tests/framework/command_line/ListOption.h b/utils/command_line/ListOption.h
index 07184e8e3b..209a85d968 100644
--- a/tests/framework/command_line/ListOption.h
+++ b/utils/command_line/ListOption.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_LISTOPTION
-#define ARM_COMPUTE_TEST_LISTOPTION
+#ifndef ARM_COMPUTE_UTILS_LISTOPTION
+#define ARM_COMPUTE_UTILS_LISTOPTION
#include "Option.h"
@@ -34,9 +34,7 @@
namespace arm_compute
{
-namespace test
-{
-namespace framework
+namespace utils
{
/** Implementation of an option that accepts any number of values. */
template <typename T>
@@ -115,7 +113,6 @@ inline const std::vector<T> &ListOption<T>::value() const
{
return _values;
}
-} // namespace framework
-} // namespace test
+} // namespace utils
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_LISTOPTION */
+#endif /* ARM_COMPUTE_UTILS_LISTOPTION */
diff --git a/tests/framework/command_line/Option.h b/utils/command_line/Option.h
index 25cf492b86..b9469a5cc3 100644
--- a/tests/framework/command_line/Option.h
+++ b/utils/command_line/Option.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,16 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_OPTIONBASE
-#define ARM_COMPUTE_TEST_OPTIONBASE
+#ifndef ARM_COMPUTE_UTILS_OPTIONBASE
+#define ARM_COMPUTE_UTILS_OPTIONBASE
#include <string>
namespace arm_compute
{
-namespace test
-{
-namespace framework
+namespace utils
{
/** Abstract base class for a command line option. */
class Option
@@ -103,7 +101,41 @@ protected:
bool _is_set{ false };
std::string _help{};
};
-} // namespace framework
-} // namespace test
+
+inline Option::Option(std::string name)
+ : _name{ std::move(name) }
+{
+}
+
+inline Option::Option(std::string name, bool is_required, bool is_set)
+ : _name{ std::move(name) }, _is_required{ is_required }, _is_set{ is_set }
+{
+}
+
+inline std::string Option::name() const
+{
+ return _name;
+}
+
+inline void Option::set_required(bool is_required)
+{
+ _is_required = is_required;
+}
+
+inline void Option::set_help(std::string help)
+{
+ _help = std::move(help);
+}
+
+inline bool Option::is_required() const
+{
+ return _is_required;
+}
+
+inline bool Option::is_set() const
+{
+ return _is_set;
+}
+} // namespace utils
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_OPTIONBASE */
+#endif /* ARM_COMPUTE_UTILS_OPTIONBASE */
diff --git a/tests/framework/command_line/SimpleOption.h b/utils/command_line/SimpleOption.h
index d02778e781..543759259a 100644
--- a/tests/framework/command_line/SimpleOption.h
+++ b/utils/command_line/SimpleOption.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SIMPLEOPTION
-#define ARM_COMPUTE_TEST_SIMPLEOPTION
+#ifndef ARM_COMPUTE_UTILS_SIMPLEOPTION
+#define ARM_COMPUTE_UTILS_SIMPLEOPTION
#include "Option.h"
@@ -32,9 +32,7 @@
namespace arm_compute
{
-namespace test
-{
-namespace framework
+namespace utils
{
/** Implementation of an option that accepts a single value. */
template <typename T>
@@ -115,7 +113,6 @@ inline const T &SimpleOption<T>::value() const
{
return _value;
}
-} // namespace framework
-} // namespace test
+} // namespace utils
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SIMPLEOPTION */
+#endif /* ARM_COMPUTE_UTILS_SIMPLEOPTION */
diff --git a/tests/framework/command_line/ToggleOption.h b/utils/command_line/ToggleOption.h
index c440c0ee87..b1d2a32c64 100644
--- a/tests/framework/command_line/ToggleOption.h
+++ b/utils/command_line/ToggleOption.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_TOGGLEOPTION
-#define ARM_COMPUTE_TEST_TOGGLEOPTION
+#ifndef ARM_COMPUTE_UTILS_TOGGLEOPTION
+#define ARM_COMPUTE_UTILS_TOGGLEOPTION
#include "SimpleOption.h"
@@ -30,9 +30,7 @@
namespace arm_compute
{
-namespace test
-{
-namespace framework
+namespace utils
{
/** Implementation of an option that can be either true or false. */
class ToggleOption : public SimpleOption<bool>
@@ -50,7 +48,35 @@ public:
bool parse(std::string value) override;
std::string help() const override;
};
-} // namespace framework
-} // namespace test
+
+inline ToggleOption::ToggleOption(std::string name, bool default_value)
+ : SimpleOption<bool>
+{
+ std::move(name), default_value
+}
+{
+}
+
+inline bool ToggleOption::parse(std::string value)
+{
+ if(value == "true")
+ {
+ _value = true;
+ _is_set = true;
+ }
+ else if(value == "false")
+ {
+ _value = false;
+ _is_set = true;
+ }
+
+ return _is_set;
+}
+
+inline std::string ToggleOption::help() const
+{
+ return "--" + name() + ", --no-" + name() + " - " + _help;
+}
+} // namespace utils
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_TOGGLEOPTION */
+#endif /* ARM_COMPUTE_UTILS_TOGGLEOPTION */