aboutsummaryrefslogtreecommitdiff
path: root/examples/graph_vgg16.cpp
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-03-21 17:45:31 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commited99f411d52949720a4d64d91664cd71e46b79d5 (patch)
treed903b523dea830aeb48d59a66b8da59e4dcf707a /examples/graph_vgg16.cpp
parent6528aa20e768f2d801328aa164d672b7fdfe266f (diff)
downloadComputeLibrary-ed99f411d52949720a4d64d91664cd71e46b79d5.tar.gz
COMPMID-1018 - Add Winograd support in VGG16 and Alexnet examples
Change-Id: I4a2deee9e4b2c54ea79d2895cfeca44190133b24 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125453 Reviewed-by: Pablo Tello <pablo.tello@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'examples/graph_vgg16.cpp')
-rw-r--r--examples/graph_vgg16.cpp9
1 files changed, 6 insertions, 3 deletions
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index faaf579047..516b7b18f0 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -71,8 +71,10 @@ public:
bool enable_memory_management = true;
// Check if we can use GEMM-based convolutions evaluating if the platform has at least 1.8 GB of available memory
- const size_t memory_required = 1932735283L;
- ConvolutionMethod convolution_hint = convolution_hint_vgg16(memory_required);
+ const size_t memory_required = 1932735283L;
+ const bool is_opencl = target_hint == Target::CL;
+ ConvolutionMethod first_convolution3x3_hint = is_opencl ? ConvolutionMethod::DIRECT : ConvolutionMethod::GEMM;
+ ConvolutionMethod convolution3x3_hint = is_opencl ? ConvolutionMethod::WINOGRAD : convolution_hint_vgg16(memory_required);
// Parse arguments
if(argc < 2)
@@ -107,7 +109,7 @@ public:
}
graph << target_hint
- << convolution_hint
+ << first_convolution3x3_hint
<< InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
get_input_accessor(image, std::move(preprocessor)))
// Layer 1
@@ -117,6 +119,7 @@ public:
get_weights_accessor(data_path, "/cnn_data/vgg16_model/conv1_1_b.npy"),
PadStrideInfo(1, 1, 1, 1))
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+ << convolution3x3_hint
// Layer 2
<< ConvolutionLayer(
3U, 3U, 64U,