aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-12-22 15:27:52 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commitd8734b55d89f05901ba9a75349761a9c955d9243 (patch)
treee23d53a0fb73251f7416993e4d3a7241e533e79e /examples
parent7390e05561a5c49306ebbf2eb2dcb1848546f201 (diff)
downloadComputeLibrary-d8734b55d89f05901ba9a75349761a9c955d9243.tar.gz
COMPMID-793 : Add graph intermediate representation
Change-Id: Ic1685de4e19e0ac79669ef2da64e1dc96c7ea0bf Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/115248 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/SConscript8
-rw-r--r--examples/graph_googlenet.cpp34
-rw-r--r--examples/graph_inception_v3.cpp263
-rw-r--r--examples/graph_inception_v4.cpp184
-rw-r--r--examples/graph_lenet.cpp22
-rw-r--r--examples/graph_mobilenet.cpp41
-rw-r--r--examples/graph_resnet50.cpp41
-rw-r--r--examples/graph_squeezenet.cpp32
-rw-r--r--examples/graph_squeezenet_v1_1.cpp29
-rw-r--r--examples/graph_vgg16.cpp31
-rw-r--r--examples/graph_vgg19.cpp25
11 files changed, 378 insertions, 332 deletions
diff --git a/examples/SConscript b/examples/SConscript
index 9be9fa9d9a..80bce57316 100644
--- a/examples/SConscript
+++ b/examples/SConscript
@@ -57,15 +57,17 @@ if env['opencl'] and env['neon']:
alias = examples_env.Alias(example, prog)
Default(alias)
if env['os'] == 'android':
+ Import('arm_compute_graph2_a')
Import('arm_compute_graph_a')
Import('arm_compute_core_a')
Import('arm_compute_a')
arm_compute_graph_libs = [ arm_compute_a, arm_compute_core_a, "OpenCL"]
- graph_dependency = arm_compute_graph_a
+ graph_dependency = [arm_compute_graph_a, arm_compute_graph2_a]
else:
+ Import('arm_compute_graph2_so')
Import('arm_compute_graph_so')
- arm_compute_graph_libs = ["arm_compute_graph", "arm_compute", "arm_compute_core"]
- graph_dependency = arm_compute_graph_so
+ arm_compute_graph_libs = ["arm_compute_graph2", "arm_compute_graph", "arm_compute", "arm_compute_core"]
+ graph_dependency = [arm_compute_graph_so, arm_compute_graph2_so]
graph_utils = examples_env.Object("../utils/GraphUtils.cpp")
for file in Glob("./graph_*.cpp"):
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index de4afa29ea..d64512bb96 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -21,9 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -32,7 +30,7 @@
#include <tuple>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement Googlenet's network using the Compute Library's graph API
@@ -54,9 +52,11 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
- ConvolutionMethodHint convolution_hint = ConvolutionMethodHint::GEMM;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ ConvolutionMethod convolution_hint = ConvolutionMethod::GEMM;
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -91,8 +91,8 @@ public:
}
graph << target_hint
- << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor)))
<< ConvolutionLayer(
7U, 7U, 64U,
get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv1/conv1_7x7_s2_w.npy"),
@@ -133,10 +133,10 @@ public:
get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_w.npy"),
get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_b.npy"))
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
{
@@ -145,7 +145,7 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "GoogleNet" };
BranchLayer get_inception_node(const std::string &data_path, std::string &&param_path,
unsigned int a_filt,
@@ -154,7 +154,7 @@ private:
unsigned int d_filt)
{
std::string total_path = "/cnn_data/googlenet_model/" + param_path + "/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
1U, 1U, a_filt,
get_weights_accessor(data_path, total_path + "1x1_w.npy"),
@@ -162,7 +162,7 @@ private:
PadStrideInfo(1, 1, 0, 0))
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
1U, 1U, std::get<0>(b_filters),
get_weights_accessor(data_path, total_path + "3x3_reduce_w.npy"),
@@ -176,7 +176,7 @@ private:
PadStrideInfo(1, 1, 1, 1))
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
+ SubStream i_c(graph);
i_c << ConvolutionLayer(
1U, 1U, std::get<0>(c_filters),
get_weights_accessor(data_path, total_path + "5x5_reduce_w.npy"),
@@ -190,7 +190,7 @@ private:
PadStrideInfo(1, 1, 2, 2))
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_d;
+ SubStream i_d(graph);
i_d << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL)))
<< ConvolutionLayer(
1U, 1U, d_filt,
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index a10037be89..9bb51bad44 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -21,9 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -32,15 +30,15 @@
#include <tuple>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement InceptionV3's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels )
+ * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] image, [optional] labels )
*/
-class InceptionV3Example final : public Example
+class InceptionV3Example : public Example
{
public:
void do_setup(int argc, char **argv) override
@@ -53,8 +51,10 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -88,8 +88,8 @@ public:
label = argv[4];
}
- graph << target_hint << Tensor(TensorInfo(TensorShape(299U, 299U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false))
+ graph << target_hint << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor), false))
<< ConvolutionLayer(3U, 3U, 32U,
get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_weights.npy"),
@@ -100,7 +100,8 @@ public:
"/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
"/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(3U, 3U, 32U,
get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_weights.npy"),
@@ -111,7 +112,8 @@ public:
"/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
"/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(3U, 3U, 64U,
get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_weights.npy"),
@@ -122,7 +124,8 @@ public:
"/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
"/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
@@ -135,7 +138,8 @@ public:
"/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
"/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(3U, 3U, 192U,
get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_weights.npy"),
@@ -146,7 +150,8 @@ public:
"/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
"/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
@@ -183,10 +188,10 @@ public:
"/cnn_data/inceptionv3_model/Logits_Conv2d_1c_1x1_biases.npy"),
PadStrideInfo(1, 1, 0, 0))
<< ReshapeLayer(TensorShape(1001U)) << SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
@@ -195,7 +200,7 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "InceptionV3" };
private:
BranchLayer get_inception_node_A(const std::string &data_path, std::string &&param_path,
@@ -216,7 +221,7 @@ private:
conv_id1 = "_1_0c_";
}
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
1U, 1U, a_filt,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -227,9 +232,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
1U, 1U, std::get<0>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_weights.npy"),
@@ -240,7 +246,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
5U, 5U, std::get<1>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_weights.npy"),
@@ -251,9 +258,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
+ SubStream i_c(graph);
i_c << ConvolutionLayer(
1U, 1U, std::get<0>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -264,7 +272,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
3U, 3U, std::get<1>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"),
@@ -275,7 +284,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
3U, 3U, std::get<2>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_weights.npy"),
@@ -286,9 +296,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_d;
+ SubStream i_d(graph);
i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
<< ConvolutionLayer(
1U, 1U, d_filt,
@@ -300,7 +311,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
}
@@ -310,7 +322,7 @@ private:
std::tuple<unsigned int, unsigned int, unsigned int> b_filters)
{
std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
3U, 3U, a_filt,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_weights.npy"),
@@ -321,9 +333,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
1U, 1U, std::get<0>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -334,7 +347,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
3U, 3U, std::get<1>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_weights.npy"),
@@ -345,7 +359,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
3U, 3U, std::get<2>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_weights.npy"),
@@ -356,12 +371,11 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
- i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
- // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+ SubStream i_c(graph);
+ i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)));
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
}
@@ -373,7 +387,7 @@ private:
unsigned int d_filt)
{
std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
1U, 1U, a_filt,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -384,9 +398,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
1U, 1U, std::get<0>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -397,7 +412,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
7U, 1U, std::get<1>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"),
@@ -408,7 +424,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
1U, 7U, std::get<2>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"),
@@ -419,9 +436,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
+ SubStream i_c(graph);
i_c << ConvolutionLayer(
1U, 1U, std::get<0>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -432,7 +450,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
1U, 7U, std::get<1>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_weights.npy"),
@@ -443,7 +462,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
7U, 1U, std::get<2>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_weights.npy"),
@@ -454,7 +474,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
1U, 7U, std::get<3>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_weights.npy"),
@@ -465,7 +486,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
7U, 1U, std::get<4>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_weights.npy"),
@@ -476,9 +498,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_d;
+ SubStream i_d(graph);
i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
<< ConvolutionLayer(
1U, 1U, d_filt,
@@ -490,7 +513,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
}
@@ -500,7 +524,7 @@ private:
std::tuple<unsigned int, unsigned int, unsigned int, unsigned int> b_filters)
{
std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
1U, 1U, std::get<0>(a_filters),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -511,7 +535,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
3U, 3U, std::get<1>(a_filters),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"),
@@ -522,9 +547,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
1U, 1U, std::get<0>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -535,7 +561,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
7U, 1U, std::get<1>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"),
@@ -546,7 +573,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
1U, 7U, std::get<2>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"),
@@ -557,7 +585,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
<< ConvolutionLayer(
3U, 3U, std::get<3>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_weights.npy"),
@@ -568,12 +597,11 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
- i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
- // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+ SubStream i_c(graph);
+ i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)));
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
}
@@ -593,7 +621,7 @@ private:
}
std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
1U, 1U, a_filt,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -604,9 +632,24 @@ private:
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+ SubStream i_b(graph);
+ i_b << ConvolutionLayer(
+ 1U, 1U, std::get<0>(b_filters),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
+ std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+ PadStrideInfo(1, 1, 0, 0))
+ << BatchNormalizationLayer(
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
+ get_random_accessor(1.f, 1.f),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b1;
+ SubStream i_b1(static_cast<IStream &>(i_b));
i_b1 << ConvolutionLayer(
3U, 1U, std::get<1>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"),
@@ -617,9 +660,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b2;
+ SubStream i_b2(static_cast<IStream &>(i_b));
i_b2 << ConvolutionLayer(
1U, 3U, std::get<2>(b_filters),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_weights.npy"),
@@ -630,23 +674,39 @@ private:
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
- i_b << ConvolutionLayer(
- 1U, 1U, std::get<0>(b_filters),
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
+ // Merge b1 and b2
+ i_b << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
+
+ SubStream i_c(graph);
+ i_c << ConvolutionLayer(
+ 1U, 1U, std::get<0>(c_filters),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
PadStrideInfo(1, 1, 0, 0))
<< BatchNormalizationLayer(
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"),
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
- << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ << ConvolutionLayer(
+ 3U, 3U, std::get<1>(c_filters),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"),
+ std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+ PadStrideInfo(1, 1, 1, 1))
+ << BatchNormalizationLayer(
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_mean.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
+ get_random_accessor(1.f, 1.f),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"),
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c1;
+ SubStream i_c1(static_cast<IStream &>(i_c));
i_c1 << ConvolutionLayer(
3U, 1U, std::get<2>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_weights.npy"),
@@ -657,9 +717,10 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c2;
+ SubStream i_c2(static_cast<IStream &>(i_c));
i_c2 << ConvolutionLayer(
1U, 3U, std::get<3>(c_filters),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_weights.npy"),
@@ -670,34 +731,13 @@ private:
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
- i_c << ConvolutionLayer(
- 1U, 1U, std::get<0>(c_filters),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
- std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
- PadStrideInfo(1, 1, 0, 0))
- << BatchNormalizationLayer(
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
- get_random_accessor(1.f, 1.f),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
- << ConvolutionLayer(
- 3U, 3U, std::get<1>(c_filters),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"),
- std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
- PadStrideInfo(1, 1, 1, 1))
- << BatchNormalizationLayer(
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_mean.npy"),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
- get_random_accessor(1.f, 1.f),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
- << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
+ // Merge i_c1 and i_c2
+ i_c << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
- SubGraph i_d;
+ SubStream i_d(graph);
i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
<< ConvolutionLayer(
1U, 1U, d_filt,
@@ -709,7 +749,8 @@ private:
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
}
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index f004b41fb0..d9f6156fb2 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -21,9 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -32,7 +30,7 @@
#include <tuple>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement InceptionV4's network using the Compute Library's graph API
@@ -52,9 +50,11 @@ public:
// Create a preprocessor object
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
- // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
+ // Set target. 0 (NEON), 1 (OpenCL). By default it is NEON
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -88,8 +88,8 @@ public:
label = argv[4];
}
- graph << target_hint << Tensor(TensorInfo(TensorShape(299U, 299U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false))
+ graph << target_hint << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor), false))
// Conv2d_1a_3x3
<< ConvolutionLayer(3U, 3U, 32U,
@@ -153,10 +153,10 @@ public:
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_weights.npy"),
get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_biases.npy"))
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
@@ -165,19 +165,17 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "InceptionV4" };
private:
BranchLayer get_mixed_3a(const std::string &data_path)
{
std::string total_path = "/cnn_data/inceptionv4_model/Mixed_3a_";
- SubGraph i_a;
- i_a << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
- // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+ SubStream i_a(graph);
+ i_a << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(3U, 3U, 96U,
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_3x3_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -195,7 +193,7 @@ private:
{
std::string total_path = "/cnn_data/inceptionv4_model/Mixed_4a_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(1U, 1U, 64U,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -215,7 +213,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(1U, 1U, 64U,
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -260,7 +258,7 @@ private:
{
std::string total_path = "/cnn_data/inceptionv4_model/Mixed_5a_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(3U, 3U, 192U,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -271,10 +269,8 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
- i_b << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
- // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+ SubStream i_b(graph);
+ i_b << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b));
}
@@ -283,7 +279,7 @@ private:
{
std::string total_path = "/cnn_data/inceptionv4_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(1U, 1U, 96U,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -294,7 +290,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(1U, 1U, 64U,
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -314,7 +310,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
+ SubStream i_c(graph);
i_c << ConvolutionLayer(1U, 1U, 64U,
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -343,7 +339,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_d;
+ SubStream i_d(graph);
i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
<< ConvolutionLayer(1U, 1U, 96U,
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"),
@@ -362,7 +358,7 @@ private:
{
std::string total_path = "/cnn_data/inceptionv4_model/Mixed_6a_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(3U, 3U, 384U,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -373,7 +369,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(1U, 1U, 192U,
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -402,10 +398,9 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
- i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
- // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+ SubStream i_c(graph);
+ i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
+
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
}
@@ -413,7 +408,7 @@ private:
{
std::string total_path = "/cnn_data/inceptionv4_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(1U, 1U, 384U,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -424,7 +419,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(1U, 1U, 192U,
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -453,7 +448,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
+ SubStream i_c(graph);
i_c << ConvolutionLayer(1U, 1U, 192U,
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -500,7 +495,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_d;
+ SubStream i_d(graph);
i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
<< ConvolutionLayer(1U, 1U, 128U,
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"),
@@ -519,7 +514,7 @@ private:
{
std::string total_path = "/cnn_data/inceptionv4_model/Mixed_7a_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(1U, 1U, 192U,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -539,7 +534,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(1U, 1U, 256U,
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -577,10 +572,9 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
- i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
- // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+ SubStream i_c(graph);
+ i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
+
return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
}
@@ -588,7 +582,7 @@ private:
{
std::string total_path = "/cnn_data/inceptionv4_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(1U, 1U, 256U,
get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -599,35 +593,7 @@ private:
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b1;
- i_b1 << ConvolutionLayer(
- 3U, 1U, 256U,
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"),
- std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
- PadStrideInfo(1, 1, 1, 0))
- << BatchNormalizationLayer(
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_mean.npy"),
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_variance.npy"),
- get_random_accessor(1.f, 1.f),
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_beta.npy"),
- 0.001f)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
-
- SubGraph i_b2;
- i_b2 << ConvolutionLayer(
- 1U, 3U, 256U,
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_weights.npy"),
- std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
- PadStrideInfo(1, 1, 0, 1))
- << BatchNormalizationLayer(
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_BatchNorm_moving_mean.npy"),
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_BatchNorm_moving_variance.npy"),
- get_random_accessor(1.f, 1.f),
- get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_BatchNorm_beta.npy"),
- 0.001f)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
-
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
1U, 1U, 384U,
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -639,38 +605,40 @@ private:
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
0.001f)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
- << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c1;
- i_c1 << ConvolutionLayer(
+ SubStream i_b1(static_cast<IStream &>(i_b));
+ i_b1 << ConvolutionLayer(
3U, 1U, 256U,
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_weights.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
PadStrideInfo(1, 1, 1, 0))
<< BatchNormalizationLayer(
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_mean.npy"),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_variance.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_mean.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_beta.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_beta.npy"),
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c2;
- i_c2 << ConvolutionLayer(
+ SubStream i_b2(static_cast<IStream &>(i_b));
+ i_b2 << ConvolutionLayer(
1U, 3U, 256U,
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_weights.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
PadStrideInfo(1, 1, 0, 1))
<< BatchNormalizationLayer(
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_mean.npy"),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_variance.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_BatchNorm_moving_mean.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_BatchNorm_moving_variance.npy"),
get_random_accessor(1.f, 1.f),
- get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_beta.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_BatchNorm_beta.npy"),
0.001f)
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_c;
+ // Merge b1 and b2
+ i_b << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
+
+ SubStream i_c(graph);
i_c << ConvolutionLayer(
1U, 1U, 384U,
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -706,10 +674,40 @@ private:
get_random_accessor(1.f, 1.f),
get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_beta.npy"),
0.001f)
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
- << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+ SubStream i_c1(static_cast<IStream &>(i_c));
+ i_c1 << ConvolutionLayer(
+ 3U, 1U, 256U,
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_weights.npy"),
+ std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+ PadStrideInfo(1, 1, 1, 0))
+ << BatchNormalizationLayer(
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_mean.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_variance.npy"),
+ get_random_accessor(1.f, 1.f),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_beta.npy"),
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+ SubStream i_c2(static_cast<IStream &>(i_c));
+ i_c2 << ConvolutionLayer(
+ 1U, 3U, 256U,
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_weights.npy"),
+ std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+ PadStrideInfo(1, 1, 0, 1))
+ << BatchNormalizationLayer(
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_mean.npy"),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_variance.npy"),
+ get_random_accessor(1.f, 1.f),
+ get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_beta.npy"),
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+ // Merge i_c1 and i_c2
+ i_c << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
- SubGraph i_d;
+ SubStream i_d(graph);
i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
<< ConvolutionLayer(1U, 1U, 256U,
get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"),
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 61bc7bd3bf..e4b8effe5d 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
+
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -30,7 +30,7 @@
#include <cstdlib>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement LeNet's network using the Compute Library's graph API
@@ -47,8 +47,10 @@ public:
unsigned int batches = 4; /** Number of batches */
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -78,7 +80,7 @@ public:
//conv1 << pool1 << conv2 << pool2 << fc1 << act1 << fc2 << smx
graph << target_hint
- << Tensor(TensorInfo(TensorShape(28U, 28U, 1U, batches), 1, DataType::F32), DummyAccessor())
+ << InputLayer(TensorDescriptor(TensorShape(28U, 28U, 1U, batches), DataType::F32), get_input_accessor(""))
<< ConvolutionLayer(
5U, 5U, 20U,
get_weights_accessor(data_path, "/cnn_data/lenet_model/conv1_w.npy"),
@@ -101,10 +103,10 @@ public:
get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_w.npy"),
get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_b.npy"))
<< SoftmaxLayer()
- << Tensor(DummyAccessor(0));
+ << OutputLayer(get_output_accessor(""));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
{
@@ -113,7 +115,7 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "LeNet" };
};
/** Main program for LeNet
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index 1a930dd950..4d01055c50 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -21,8 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -30,7 +29,7 @@
#include <cstdlib>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement MobileNet's network using the Compute Library's graph API
@@ -51,9 +50,12 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
- ConvolutionMethodHint convolution_hint = ConvolutionMethodHint::GEMM;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ ConvolutionMethod convolution_hint = ConvolutionMethod::GEMM;
+ DepthwiseConvolutionMethod depthwise_convolution_hint = DepthwiseConvolutionMethod::OPTIMIZED_3x3;
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Set model to execute. 0 (MobileNetV1_1.0_224), 1 (MobileNetV1_0.75_160)
int model_id = (argc > 2) ? std::strtol(argv[2], nullptr, 10) : 0;
@@ -109,8 +111,9 @@ public:
graph << target_hint
<< convolution_hint
- << Tensor(TensorInfo(TensorShape(spatial_size, spatial_size, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false))
+ << depthwise_convolution_hint
+ << InputLayer(TensorDescriptor(TensorShape(spatial_size, spatial_size, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor), false))
<< ConvolutionLayer(
3U, 3U, 32U * depth_scale,
get_weights_accessor(data_path, "Conv2d_0_weights.npy"),
@@ -121,7 +124,8 @@ public:
get_weights_accessor(data_path, "Conv2d_0_BatchNorm_moving_variance.npy"),
get_weights_accessor(data_path, "Conv2d_0_BatchNorm_gamma.npy"),
get_weights_accessor(data_path, "Conv2d_0_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
<< get_dwsc_node(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0))
<< get_dwsc_node(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0))
<< get_dwsc_node(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0))
@@ -143,10 +147,10 @@ public:
PadStrideInfo(1, 1, 0, 0))
<< ReshapeLayer(TensorShape(1001U))
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
{
@@ -155,26 +159,26 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "MobileNetV1" };
BranchLayer get_dwsc_node(const std::string &data_path, std::string &&param_path,
unsigned int conv_filt,
PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
{
std::string total_path = param_path + "_";
- SubGraph sg;
+ SubStream sg(graph);
sg << DepthwiseConvolutionLayer(
3U, 3U,
get_weights_accessor(data_path, total_path + "depthwise_depthwise_weights.npy"),
std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
- dwc_pad_stride_info,
- true)
+ dwc_pad_stride_info)
<< BatchNormalizationLayer(
get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_mean.npy"),
get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_variance.npy"),
get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_gamma.npy"),
get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
<< ConvolutionLayer(
1U, 1U, conv_filt,
get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
@@ -185,7 +189,8 @@ private:
get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_moving_variance.npy"),
get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_gamma.npy"),
get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_beta.npy"),
- 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
+ 0.001f)
+ << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
return BranchLayer(std::move(sg));
}
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index e4d31f98d7..90debb4293 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -21,8 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -30,7 +29,7 @@
#include <cstdlib>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement Microsoft's ResNet50 network using the Compute Library's graph API
@@ -53,8 +52,10 @@ public:
false /* Do not convert to BGR */);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -89,8 +90,8 @@ public:
}
graph << target_hint
- << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor), false /* Do not convert to BGR */))
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor), false /* Do not convert to BGR */))
<< ConvolutionLayer(
7U, 7U, 64U,
get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_weights.npy"),
@@ -118,11 +119,12 @@ public:
PadStrideInfo(1, 1, 0, 0))
<< FlattenLayer()
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
+
void do_run() override
{
// Run graph
@@ -130,7 +132,7 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "ResNet50" };
void add_residual_block(const std::string &data_path, const std::string &name, unsigned int base_depth, unsigned int num_units, unsigned int stride)
{
@@ -147,7 +149,7 @@ private:
middle_stride = stride;
}
- SubGraph right;
+ SubStream right(graph);
right << ConvolutionLayer(
1U, 1U, base_depth,
get_weights_accessor(data_path, unit_name + "conv1_weights.npy"),
@@ -188,7 +190,7 @@ private:
if(i == 0)
{
- SubGraph left;
+ SubStream left(graph);
left << ConvolutionLayer(
1U, 1U, base_depth * 4,
get_weights_accessor(data_path, unit_name + "shortcut_weights.npy"),
@@ -201,20 +203,19 @@ private:
get_weights_accessor(data_path, unit_name + "shortcut_BatchNorm_beta.npy"),
0.0000100099996416f);
- graph << ResidualLayer(std::move(left), std::move(right));
+ graph << BranchLayer(BranchMergeMethod::ADD, std::move(left), std::move(right));
}
else if(middle_stride > 1)
{
- SubGraph left;
- left << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, PadStrideInfo(middle_stride, middle_stride, 0, 0), true))
- // TODO (alegil01) : Remove once we understand why a single node graph does not run in CL
- << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+ SubStream left(graph);
+ left << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, PadStrideInfo(middle_stride, middle_stride, 0, 0), true));
- graph << ResidualLayer(std::move(left), std::move(right));
+ graph << BranchLayer(BranchMergeMethod::ADD, std::move(left), std::move(right));
}
else
{
- graph << ResidualLayer(std::move(right));
+ SubStream left(graph);
+ graph << BranchLayer(BranchMergeMethod::ADD, std::move(left), std::move(right));
}
graph << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index d0c823a11c..b4e00a451b 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -21,9 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -32,14 +30,10 @@
#include <tuple>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
using namespace arm_compute::logging;
-namespace
-{
-} // namespace
-
/** Example demonstrating how to implement Squeezenet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
@@ -59,8 +53,10 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -95,8 +91,8 @@ public:
}
graph << target_hint
- << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor)))
<< ConvolutionLayer(
7U, 7U, 96U,
get_weights_accessor(data_path, "/cnn_data/squeezenet_v1.0_model/conv1_w.npy"),
@@ -171,10 +167,10 @@ public:
<< PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
<< FlattenLayer()
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
{
@@ -183,12 +179,12 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "SqueezeNetV1" };
BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
{
std::string total_path = "/cnn_data/squeezenet_v1.0_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
1U, 1U, expand1_filt,
get_weights_accessor(data_path, total_path + "expand1x1_w.npy"),
@@ -196,7 +192,7 @@ private:
PadStrideInfo(1, 1, 0, 0))
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
3U, 3U, expand3_filt,
get_weights_accessor(data_path, total_path + "expand3x3_w.npy"),
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index 189cc027fd..4ebfd3fe8e 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -21,9 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -32,9 +30,8 @@
#include <tuple>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
-using namespace arm_compute::logging;
namespace
{
@@ -59,8 +56,10 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -95,8 +94,8 @@ public:
}
graph << target_hint
- << Tensor(TensorInfo(TensorShape(227U, 227U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor)))
<< ConvolutionLayer(
3U, 3U, 64U,
get_weights_accessor(data_path, "/cnn_data/squeezenet_v1_1_model/conv1_w.npy"),
@@ -171,10 +170,10 @@ public:
<< PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
<< FlattenLayer()
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
{
@@ -183,12 +182,12 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "SqueezeNetV1.1" };
BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
{
std::string total_path = "/cnn_data/squeezenet_v1_1_model/" + param_path + "_";
- SubGraph i_a;
+ SubStream i_a(graph);
i_a << ConvolutionLayer(
1U, 1U, expand1_filt,
get_weights_accessor(data_path, total_path + "expand1x1_w.npy"),
@@ -196,7 +195,7 @@ private:
PadStrideInfo(1, 1, 0, 0))
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
- SubGraph i_b;
+ SubStream i_b(graph);
i_b << ConvolutionLayer(
3U, 3U, expand3_filt,
get_weights_accessor(data_path, total_path + "expand3x3_w.npy"),
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index c8cc5b2558..faaf579047 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -21,8 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -30,7 +29,7 @@
#include <cstdlib>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
namespace
@@ -41,9 +40,9 @@ namespace
*
* @return The convolution layer hint
*/
-ConvolutionMethodHint convolution_hint_vgg16(size_t size_in_bytes)
+ConvolutionMethod convolution_hint_vgg16(size_t size_in_bytes)
{
- return ((get_mem_free_from_meminfo() * 1024) >= size_in_bytes) ? ConvolutionMethodHint::GEMM : ConvolutionMethodHint::DIRECT;
+ return ((get_mem_free_from_meminfo() * 1024) >= size_in_bytes) ? ConvolutionMethod::GEMM : ConvolutionMethod::DIRECT;
}
} // namespace
@@ -66,12 +65,14 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Check if we can use GEMM-based convolutions evaluating if the platform has at least 1.8 GB of available memory
- const size_t memory_required = 1932735283L;
- ConvolutionMethodHint convolution_hint = convolution_hint_vgg16(memory_required);
+ const size_t memory_required = 1932735283L;
+ ConvolutionMethod convolution_hint = convolution_hint_vgg16(memory_required);
// Parse arguments
if(argc < 2)
@@ -107,8 +108,8 @@ public:
graph << target_hint
<< convolution_hint
- << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -224,10 +225,10 @@ public:
get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc8_b.npy"))
// Softmax
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
{
@@ -236,7 +237,7 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "VGG16" };
};
/** Main program for VGG16
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index 69ae23d87c..55502e0e00 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -21,8 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
#include "support/ToolchainSupport.h"
#include "utils/GraphUtils.h"
#include "utils/Utils.h"
@@ -30,7 +29,7 @@
#include <cstdlib>
using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
using namespace arm_compute::graph_utils;
/** Example demonstrating how to implement VGG19's network using the Compute Library's graph API
@@ -52,9 +51,11 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- TargetHint target_hint = set_target_hint(int_target_hint);
- ConvolutionMethodHint convolution_hint = ConvolutionMethodHint::DIRECT;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ ConvolutionMethod convolution_hint = ConvolutionMethod::DIRECT;
+ bool enable_tuning = (target == 2);
+ bool enable_memory_management = true;
// Parse arguments
if(argc < 2)
@@ -90,8 +91,8 @@ public:
graph << target_hint
<< convolution_hint
- << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
- get_input_accessor(image, std::move(preprocessor)))
+ << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+ get_input_accessor(image, std::move(preprocessor)))
// Layer 1
<< ConvolutionLayer(
3U, 3U, 64U,
@@ -217,10 +218,10 @@ public:
get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc8_b.npy"))
// Softmax
<< SoftmaxLayer()
- << Tensor(get_output_accessor(label, 5));
+ << OutputLayer(get_output_accessor(label, 5));
- // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
- graph.graph_init(int_target_hint == 2);
+ // Finalize graph
+ graph.finalize(target_hint, enable_tuning, enable_memory_management);
}
void do_run() override
{
@@ -229,7 +230,7 @@ public:
}
private:
- Graph graph{};
+ Stream graph{ 0, "VGG19" };
};
/** Main program for VGG19